diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/BuildExecutable.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/BuildExecutable.py new file mode 100644 index 0000000000000000000000000000000000000000..0190cc86ff1539d0c4b4916c559b0f2163c279ed --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/BuildExecutable.py @@ -0,0 +1,170 @@ +""" +Compile a Python script into an executable that embeds CPython. +Requires CPython to be built as a shared library ('libpythonX.Y'). + +Basic usage: + + python -m Cython.Build.BuildExecutable [ARGS] somefile.py +""" + +from __future__ import absolute_import + +DEBUG = True + +import sys +import os +if sys.version_info < (3, 9): + from distutils import sysconfig as _sysconfig + + class sysconfig(object): + + @staticmethod + def get_path(name): + assert name == 'include' + return _sysconfig.get_python_inc() + + get_config_var = staticmethod(_sysconfig.get_config_var) +else: + # sysconfig can be trusted from cpython >= 3.8.7 + import sysconfig + + +def get_config_var(name, default=''): + return sysconfig.get_config_var(name) or default + +INCDIR = sysconfig.get_path('include') +LIBDIR1 = get_config_var('LIBDIR') +LIBDIR2 = get_config_var('LIBPL') +PYLIB = get_config_var('LIBRARY') +PYLIB_DYN = get_config_var('LDLIBRARY') +if PYLIB_DYN == PYLIB: + # no shared library + PYLIB_DYN = '' +else: + PYLIB_DYN = os.path.splitext(PYLIB_DYN[3:])[0] # 'lib(XYZ).so' -> XYZ + +CC = get_config_var('CC', os.environ.get('CC', '')) +CFLAGS = get_config_var('CFLAGS') + ' ' + os.environ.get('CFLAGS', '') +LINKCC = get_config_var('LINKCC', os.environ.get('LINKCC', CC)) +LINKFORSHARED = get_config_var('LINKFORSHARED') +LIBS = get_config_var('LIBS') +SYSLIBS = get_config_var('SYSLIBS') +EXE_EXT = sysconfig.get_config_var('EXE') + + +def _debug(msg, *args): + if DEBUG: + if args: + msg = msg % args + sys.stderr.write(msg + '\n') + + +def dump_config(): + _debug('INCDIR: %s', INCDIR) + _debug('LIBDIR1: %s', LIBDIR1) + _debug('LIBDIR2: %s', LIBDIR2) + _debug('PYLIB: %s', PYLIB) + _debug('PYLIB_DYN: %s', PYLIB_DYN) + _debug('CC: %s', CC) + _debug('CFLAGS: %s', CFLAGS) + _debug('LINKCC: %s', LINKCC) + _debug('LINKFORSHARED: %s', LINKFORSHARED) + _debug('LIBS: %s', LIBS) + _debug('SYSLIBS: %s', SYSLIBS) + _debug('EXE_EXT: %s', EXE_EXT) + + +def _parse_args(args): + cy_args = [] + last_arg = None + for i, arg in enumerate(args): + if arg.startswith('-'): + cy_args.append(arg) + elif last_arg in ('-X', '--directive'): + cy_args.append(arg) + else: + input_file = arg + args = args[i+1:] + break + last_arg = arg + else: + raise ValueError('no input file provided') + + return input_file, cy_args, args + + +def runcmd(cmd, shell=True): + if shell: + cmd = ' '.join(cmd) + _debug(cmd) + else: + _debug(' '.join(cmd)) + + import subprocess + returncode = subprocess.call(cmd, shell=shell) + + if returncode: + sys.exit(returncode) + + +def clink(basename): + runcmd([LINKCC, '-o', basename + EXE_EXT, basename+'.o', '-L'+LIBDIR1, '-L'+LIBDIR2] + + [PYLIB_DYN and ('-l'+PYLIB_DYN) or os.path.join(LIBDIR1, PYLIB)] + + LIBS.split() + SYSLIBS.split() + LINKFORSHARED.split()) + + +def ccompile(basename): + runcmd([CC, '-c', '-o', basename+'.o', basename+'.c', '-I' + INCDIR] + CFLAGS.split()) + + +def cycompile(input_file, options=()): + from ..Compiler import Version, CmdLine, Main + options, sources = CmdLine.parse_command_line(list(options or ()) + ['--embed', input_file]) + _debug('Using Cython %s to compile %s', Version.version, input_file) + result = Main.compile(sources, options) + if result.num_errors > 0: + sys.exit(1) + + +def exec_file(program_name, args=()): + runcmd([os.path.abspath(program_name)] + list(args), shell=False) + + +def build(input_file, compiler_args=(), force=False): + """ + Build an executable program from a Cython module. + + Returns the name of the executable file. + """ + basename = os.path.splitext(input_file)[0] + exe_file = basename + EXE_EXT + if not force and os.path.abspath(exe_file) == os.path.abspath(input_file): + raise ValueError("Input and output file names are the same, refusing to overwrite") + if (not force and os.path.exists(exe_file) and os.path.exists(input_file) + and os.path.getmtime(input_file) <= os.path.getmtime(exe_file)): + _debug("File is up to date, not regenerating %s", exe_file) + return exe_file + cycompile(input_file, compiler_args) + ccompile(basename) + clink(basename) + return exe_file + + +def build_and_run(args): + """ + Build an executable program from a Cython module and run it. + + Arguments after the module name will be passed verbatimly to the program. + """ + program_name, args = _build(args) + exec_file(program_name, args) + + +def _build(args): + input_file, cy_args, args = _parse_args(args) + program_name = build(input_file, cy_args) + return program_name, args + + +if __name__ == '__main__': + _build(sys.argv[1:]) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Cythonize.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Cythonize.py new file mode 100644 index 0000000000000000000000000000000000000000..b4beb21995f7b0ce0fe20e34ac54ee0dbe0a149d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Cythonize.py @@ -0,0 +1,255 @@ +from __future__ import absolute_import, print_function + +import os +import shutil +import tempfile + +from .Dependencies import cythonize, extended_iglob +from ..Utils import is_package_dir +from ..Compiler import Options + +try: + import multiprocessing + parallel_compiles = int(multiprocessing.cpu_count() * 1.5) +except ImportError: + multiprocessing = None + parallel_compiles = 0 + + +class _FakePool(object): + def map_async(self, func, args): + try: + from itertools import imap + except ImportError: + imap=map + for _ in imap(func, args): + pass + + def close(self): + pass + + def terminate(self): + pass + + def join(self): + pass + + +def find_package_base(path): + base_dir, package_path = os.path.split(path) + while is_package_dir(base_dir): + base_dir, parent = os.path.split(base_dir) + package_path = '%s/%s' % (parent, package_path) + return base_dir, package_path + +def cython_compile(path_pattern, options): + all_paths = map(os.path.abspath, extended_iglob(path_pattern)) + _cython_compile_files(all_paths, options) + +def _cython_compile_files(all_paths, options): + pool = None + try: + for path in all_paths: + if options.build_inplace: + base_dir = path + while not os.path.isdir(base_dir) or is_package_dir(base_dir): + base_dir = os.path.dirname(base_dir) + else: + base_dir = None + + if os.path.isdir(path): + # recursively compiling a package + paths = [os.path.join(path, '**', '*.{py,pyx}')] + else: + # assume it's a file(-like thing) + paths = [path] + + ext_modules = cythonize( + paths, + nthreads=options.parallel, + exclude_failures=options.keep_going, + exclude=options.excludes, + compiler_directives=options.directives, + compile_time_env=options.compile_time_env, + force=options.force, + quiet=options.quiet, + depfile=options.depfile, + language=options.language, + **options.options) + + if ext_modules and options.build: + if len(ext_modules) > 1 and options.parallel > 1: + if pool is None: + try: + pool = multiprocessing.Pool(options.parallel) + except OSError: + pool = _FakePool() + pool.map_async(run_distutils, [ + (base_dir, [ext]) for ext in ext_modules]) + else: + run_distutils((base_dir, ext_modules)) + except: + if pool is not None: + pool.terminate() + raise + else: + if pool is not None: + pool.close() + pool.join() + + +def run_distutils(args): + try: + from distutils.core import setup + except ImportError: + try: + from setuptools import setup + except ImportError: + raise ImportError("'distutils' is not available. Please install 'setuptools' for binary builds.") + + base_dir, ext_modules = args + script_args = ['build_ext', '-i'] + cwd = os.getcwd() + temp_dir = None + try: + if base_dir: + os.chdir(base_dir) + temp_dir = tempfile.mkdtemp(dir=base_dir) + script_args.extend(['--build-temp', temp_dir]) + setup( + script_name='setup.py', + script_args=script_args, + ext_modules=ext_modules, + ) + finally: + if base_dir: + os.chdir(cwd) + if temp_dir and os.path.isdir(temp_dir): + shutil.rmtree(temp_dir) + + +def create_args_parser(): + from argparse import ArgumentParser, RawDescriptionHelpFormatter + from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction + + parser = ArgumentParser( + formatter_class=RawDescriptionHelpFormatter, + epilog="""\ +Environment variables: + CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless + of modification times and changes. + Environment variables accepted by setuptools are supported to configure the C compiler and build: + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" + ) + + parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...', + dest='directives', default={}, type=str, + action=ParseDirectivesAction, + help='set a compiler directive') + parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...', + dest='compile_time_env', default={}, type=str, + action=ParseCompileTimeEnvAction, + help='set a compile time environment variable') + parser.add_argument('-s', '--option', metavar='NAME=VALUE', + dest='options', default={}, type=str, + action=ParseOptionsAction, + help='set a cythonize option') + parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None, + help='use Python 2 syntax mode by default') + parser.add_argument('-3', dest='language_level', action='store_const', const=3, + help='use Python 3 syntax mode by default') + parser.add_argument('--3str', dest='language_level', action='store_const', const='3str', + help='use Python 3 syntax mode by default') + parser.add_argument('-+', '--cplus', dest='language', action='store_const', const='c++', default=None, + help='Compile as C++ rather than C') + parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate', + help='Produce a colorized HTML version of the source.') + parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate', + help='Produce a colorized HTML version of the source ' + 'which includes entire generated C/C++-code.') + parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes', + action='append', default=[], + help='exclude certain file patterns from the compilation') + + parser.add_argument('-b', '--build', dest='build', action='store_true', default=None, + help='build extension modules using distutils/setuptools') + parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None, + help='build extension modules in place using distutils/setuptools (implies -b)') + parser.add_argument('-j', '--parallel', dest='parallel', metavar='N', + type=int, default=parallel_compiles, + help=('run builds in N parallel jobs (default: %d)' % + parallel_compiles or 1)) + parser.add_argument('-f', '--force', dest='force', action='store_true', default=None, + help='force recompilation') + parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None, + help='be less verbose during compilation') + + parser.add_argument('--lenient', dest='lenient', action='store_true', default=None, + help='increase Python compatibility by ignoring some compile time errors') + parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None, + help='compile as much as possible, ignore compilation failures') + parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None, + help='strip docstrings') + parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources') + parser.add_argument('sources', nargs='*') + return parser + + +def parse_args_raw(parser, args): + options, unknown = parser.parse_known_args(args) + sources = options.sources + # if positional arguments were interspersed + # some of them are in unknown + for option in unknown: + if option.startswith('-'): + parser.error("unknown option "+option) + else: + sources.append(option) + del options.sources + return (options, sources) + + +def parse_args(args): + parser = create_args_parser() + options, args = parse_args_raw(parser, args) + + if not args: + parser.error("no source files provided") + if options.build_inplace: + options.build = True + if multiprocessing is None: + options.parallel = 0 + if options.language_level: + assert options.language_level in (2, 3, '3str') + options.options['language_level'] = options.language_level + + if options.lenient: + # increase Python compatibility by ignoring compile time errors + Options.error_on_unknown_names = False + Options.error_on_uninitialized = False + + if options.annotate: + Options.annotate = options.annotate + + if options.no_docstrings: + Options.docstrings = False + + return options, args + + +def main(args=None): + options, paths = parse_args(args) + + all_paths = [] + for path in paths: + expanded_path = [os.path.abspath(p) for p in extended_iglob(path)] + if not expanded_path: + import sys + print("{}: No such file or directory: '{}'".format(sys.argv[0], path), file=sys.stderr) + sys.exit(1) + all_paths.extend(expanded_path) + _cython_compile_files(all_paths, options) + + +if __name__ == '__main__': + main() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Dependencies.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..7de406516c00c5478726518ec4fc99b91f3b7286 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Dependencies.py @@ -0,0 +1,1380 @@ +from __future__ import absolute_import, print_function + +import cython +from .. import __version__ + +import collections +import contextlib +import hashlib +import os +import shutil +import subprocess +import re, sys, time +from glob import iglob +from io import open as io_open +from os.path import relpath as _relpath +import zipfile + +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable + +try: + import gzip + gzip_open = gzip.open + gzip_ext = '.gz' +except ImportError: + gzip_open = open + gzip_ext = '' + +try: + import zlib + zipfile_compression_mode = zipfile.ZIP_DEFLATED +except ImportError: + zipfile_compression_mode = zipfile.ZIP_STORED + +try: + import pythran +except: + pythran = None + +from .. import Utils +from ..Utils import (cached_function, cached_method, path_exists, + safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, write_depfile) +from ..Compiler import Errors +from ..Compiler.Main import Context +from ..Compiler.Options import (CompilationOptions, default_options, + get_directive_defaults) + +join_path = cached_function(os.path.join) +copy_once_if_newer = cached_function(copy_file_to_dir_if_newer) +safe_makedirs_once = cached_function(safe_makedirs) + +if sys.version_info[0] < 3: + # stupid Py2 distutils enforces str type in list of sources + _fs_encoding = sys.getfilesystemencoding() + if _fs_encoding is None: + _fs_encoding = sys.getdefaultencoding() + def encode_filename_in_py2(filename): + if not isinstance(filename, bytes): + return filename.encode(_fs_encoding) + return filename +else: + def encode_filename_in_py2(filename): + return filename + basestring = str + + +def _make_relative(file_paths, base=None): + if not base: + base = os.getcwd() + if base[-1] != os.path.sep: + base += os.path.sep + return [_relpath(path, base) if path.startswith(base) else path + for path in file_paths] + + +def extended_iglob(pattern): + if '{' in pattern: + m = re.match('(.*){([^}]+)}(.*)', pattern) + if m: + before, switch, after = m.groups() + for case in switch.split(','): + for path in extended_iglob(before + case + after): + yield path + return + + # We always accept '/' and also '\' on Windows, + # because '/' is generally common for relative paths. + if '**/' in pattern or os.sep == '\\' and '**\\' in pattern: + seen = set() + first, rest = re.split(r'\*\*[%s]' % ('/\\\\' if os.sep == '\\' else '/'), pattern, 1) + if first: + first = iglob(first + os.sep) + else: + first = [''] + for root in first: + for path in extended_iglob(join_path(root, rest)): + if path not in seen: + seen.add(path) + yield path + for path in extended_iglob(join_path(root, '*', '**', rest)): + if path not in seen: + seen.add(path) + yield path + else: + for path in iglob(pattern): + yield path + + +def nonempty(it, error_msg="expected non-empty iterator"): + empty = True + for value in it: + empty = False + yield value + if empty: + raise ValueError(error_msg) + + +@cached_function +def file_hash(filename): + path = os.path.normpath(filename) + prefix = ('%d:%s' % (len(path), path)).encode("UTF-8") + m = hashlib.sha1(prefix) + with open(path, 'rb') as f: + data = f.read(65000) + while data: + m.update(data) + data = f.read(65000) + return m.hexdigest() + + +def update_pythran_extension(ext): + if pythran is None: + raise RuntimeError("You first need to install Pythran to use the np_pythran directive.") + try: + pythran_ext = pythran.config.make_extension(python=True) + except TypeError: # older pythran version only + pythran_ext = pythran.config.make_extension() + + ext.include_dirs.extend(pythran_ext['include_dirs']) + ext.extra_compile_args.extend(pythran_ext['extra_compile_args']) + ext.extra_link_args.extend(pythran_ext['extra_link_args']) + ext.define_macros.extend(pythran_ext['define_macros']) + ext.undef_macros.extend(pythran_ext['undef_macros']) + ext.library_dirs.extend(pythran_ext['library_dirs']) + ext.libraries.extend(pythran_ext['libraries']) + ext.language = 'c++' + + # These options are not compatible with the way normal Cython extensions work + for bad_option in ["-fwhole-program", "-fvisibility=hidden"]: + try: + ext.extra_compile_args.remove(bad_option) + except ValueError: + pass + + +def parse_list(s): + """ + >>> parse_list("") + [] + >>> parse_list("a") + ['a'] + >>> parse_list("a b c") + ['a', 'b', 'c'] + >>> parse_list("[a, b, c]") + ['a', 'b', 'c'] + >>> parse_list('a " " b') + ['a', ' ', 'b'] + >>> parse_list('[a, ",a", "a,", ",", ]') + ['a', ',a', 'a,', ','] + """ + if len(s) >= 2 and s[0] == '[' and s[-1] == ']': + s = s[1:-1] + delimiter = ',' + else: + delimiter = ' ' + s, literals = strip_string_literals(s) + def unquote(literal): + literal = literal.strip() + if literal[0] in "'\"": + return literals[literal[1:-1]] + else: + return literal + return [unquote(item) for item in s.split(delimiter) if item.strip()] + + +transitive_str = object() +transitive_list = object() +bool_or = object() + +distutils_settings = { + 'name': str, + 'sources': list, + 'define_macros': list, + 'undef_macros': list, + 'libraries': transitive_list, + 'library_dirs': transitive_list, + 'runtime_library_dirs': transitive_list, + 'include_dirs': transitive_list, + 'extra_objects': list, + 'extra_compile_args': transitive_list, + 'extra_link_args': transitive_list, + 'export_symbols': list, + 'depends': transitive_list, + 'language': transitive_str, + 'np_pythran': bool_or +} + + +def _legacy_strtobool(val): + # Used to be "distutils.util.strtobool", adapted for deprecation warnings. + if val == "True": + return True + elif val == "False": + return False + + import warnings + warnings.warn("The 'np_python' option requires 'True' or 'False'", category=DeprecationWarning) + val = val.lower() + if val in ('y', 'yes', 't', 'true', 'on', '1'): + return True + elif val in ('n', 'no', 'f', 'false', 'off', '0'): + return False + else: + raise ValueError("invalid truth value %r" % (val,)) + + +@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t) +def line_iter(source): + if isinstance(source, basestring): + start = 0 + while True: + end = source.find('\n', start) + if end == -1: + yield source[start:] + return + yield source[start:end] + start = end+1 + else: + for line in source: + yield line + + +class DistutilsInfo(object): + + def __init__(self, source=None, exn=None): + self.values = {} + if source is not None: + for line in line_iter(source): + line = line.lstrip() + if not line: + continue + if line[0] != '#': + break + line = line[1:].lstrip() + kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None) + if kind is not None: + key, _, value = [s.strip() for s in line[len(kind):].partition('=')] + type = distutils_settings.get(key, None) + if line.startswith("cython:") and type is None: continue + if type in (list, transitive_list): + value = parse_list(value) + if key == 'define_macros': + value = [tuple(macro.split('=', 1)) + if '=' in macro else (macro, None) + for macro in value] + if type is bool_or: + value = _legacy_strtobool(value) + self.values[key] = value + elif exn is not None: + for key in distutils_settings: + if key in ('name', 'sources','np_pythran'): + continue + value = getattr(exn, key, None) + if value: + self.values[key] = value + + def merge(self, other): + if other is None: + return self + for key, value in other.values.items(): + type = distutils_settings[key] + if type is transitive_str and key not in self.values: + self.values[key] = value + elif type is transitive_list: + if key in self.values: + # Change a *copy* of the list (Trac #845) + all = self.values[key][:] + for v in value: + if v not in all: + all.append(v) + value = all + self.values[key] = value + elif type is bool_or: + self.values[key] = self.values.get(key, False) | value + return self + + def subs(self, aliases): + if aliases is None: + return self + resolved = DistutilsInfo() + for key, value in self.values.items(): + type = distutils_settings[key] + if type in [list, transitive_list]: + new_value_list = [] + for v in value: + if v in aliases: + v = aliases[v] + if isinstance(v, list): + new_value_list += v + else: + new_value_list.append(v) + value = new_value_list + else: + if value in aliases: + value = aliases[value] + resolved.values[key] = value + return resolved + + def apply(self, extension): + for key, value in self.values.items(): + type = distutils_settings[key] + if type in [list, transitive_list]: + value = getattr(extension, key) + list(value) + setattr(extension, key, value) + + +@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t, + single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t, + hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t, + k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t) +def strip_string_literals(code, prefix='__Pyx_L'): + """ + Normalizes every string literal to be of the form '__Pyx_Lxxx', + returning the normalized code and a mapping of labels to + string literals. + """ + new_code = [] + literals = {} + counter = 0 + start = q = 0 + in_quote = False + hash_mark = single_q = double_q = -1 + code_len = len(code) + quote_type = None + quote_len = -1 + + while True: + if hash_mark < q: + hash_mark = code.find('#', q) + if single_q < q: + single_q = code.find("'", q) + if double_q < q: + double_q = code.find('"', q) + q = min(single_q, double_q) + if q == -1: + q = max(single_q, double_q) + + # We're done. + if q == -1 and hash_mark == -1: + new_code.append(code[start:]) + break + + # Try to close the quote. + elif in_quote: + if code[q-1] == u'\\': + k = 2 + while q >= k and code[q-k] == u'\\': + k += 1 + if k % 2 == 0: + q += 1 + continue + if code[q] == quote_type and ( + quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])): + counter += 1 + label = "%s%s_" % (prefix, counter) + literals[label] = code[start+quote_len:q] + full_quote = code[q:q+quote_len] + new_code.append(full_quote) + new_code.append(label) + new_code.append(full_quote) + q += quote_len + in_quote = False + start = q + else: + q += 1 + + # Process comment. + elif -1 != hash_mark and (hash_mark < q or q == -1): + new_code.append(code[start:hash_mark+1]) + end = code.find('\n', hash_mark) + counter += 1 + label = "%s%s_" % (prefix, counter) + if end == -1: + end_or_none = None + else: + end_or_none = end + literals[label] = code[hash_mark+1:end_or_none] + new_code.append(label) + if end == -1: + break + start = q = end + + # Open the quote. + else: + if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]): + quote_len = 3 + else: + quote_len = 1 + in_quote = True + quote_type = code[q] + new_code.append(code[start:q]) + start = q + q += quote_len + + return "".join(new_code), literals + + +# We need to allow spaces to allow for conditional compilation like +# IF ...: +# cimport ... +dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|" + r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|" + r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|" + r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M) +dependency_after_from_regex = re.compile( + r"(?:^\s+\(([0-9a-zA-Z_., ]*)\)[#\n])|" + r"(?:^\s+([0-9a-zA-Z_., ]*)[#\n])", + re.M) + + +def normalize_existing(base_path, rel_paths): + return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths))) + + +@cached_function +def normalize_existing0(base_dir, rel_paths): + """ + Given some base directory ``base_dir`` and a list of path names + ``rel_paths``, normalize each relative path name ``rel`` by + replacing it by ``os.path.join(base, rel)`` if that file exists. + + Return a couple ``(normalized, needed_base)`` where ``normalized`` + if the list of normalized file names and ``needed_base`` is + ``base_dir`` if we actually needed ``base_dir``. If no paths were + changed (for example, if all paths were already absolute), then + ``needed_base`` is ``None``. + """ + normalized = [] + needed_base = None + for rel in rel_paths: + if os.path.isabs(rel): + normalized.append(rel) + continue + path = join_path(base_dir, rel) + if path_exists(path): + normalized.append(os.path.normpath(path)) + needed_base = base_dir + else: + normalized.append(rel) + return (normalized, needed_base) + + +def resolve_depends(depends, include_dirs): + include_dirs = tuple(include_dirs) + resolved = [] + for depend in depends: + path = resolve_depend(depend, include_dirs) + if path is not None: + resolved.append(path) + return resolved + + +@cached_function +def resolve_depend(depend, include_dirs): + if depend[0] == '<' and depend[-1] == '>': + return None + for dir in include_dirs: + path = join_path(dir, depend) + if path_exists(path): + return os.path.normpath(path) + return None + + +@cached_function +def package(filename): + dir = os.path.dirname(os.path.abspath(str(filename))) + if dir != filename and is_package_dir(dir): + return package(dir) + (os.path.basename(dir),) + else: + return () + + +@cached_function +def fully_qualified_name(filename): + module = os.path.splitext(os.path.basename(filename))[0] + return '.'.join(package(filename) + (module,)) + + +@cached_function +def parse_dependencies(source_filename): + # Actual parsing is way too slow, so we use regular expressions. + # The only catch is that we must strip comments and string + # literals ahead of time. + with Utils.open_source_file(source_filename, error_handling='ignore') as fh: + source = fh.read() + distutils_info = DistutilsInfo(source) + source, literals = strip_string_literals(source) + source = source.replace('\\\n', ' ').replace('\t', ' ') + + # TODO: pure mode + cimports = [] + includes = [] + externs = [] + for m in dependency_regex.finditer(source): + cimport_from, cimport_list, extern, include = m.groups() + if cimport_from: + cimports.append(cimport_from) + m_after_from = dependency_after_from_regex.search(source, pos=m.end()) + if m_after_from: + multiline, one_line = m_after_from.groups() + subimports = multiline or one_line + cimports.extend("{0}.{1}".format(cimport_from, s.strip()) + for s in subimports.split(',')) + + elif cimport_list: + cimports.extend(x.strip() for x in cimport_list.split(",")) + elif extern: + externs.append(literals[extern]) + else: + includes.append(literals[include]) + return cimports, includes, externs, distutils_info + + +class DependencyTree(object): + + def __init__(self, context, quiet=False): + self.context = context + self.quiet = quiet + self._transitive_cache = {} + + def parse_dependencies(self, source_filename): + if path_exists(source_filename): + source_filename = os.path.normpath(source_filename) + return parse_dependencies(source_filename) + + @cached_method + def included_files(self, filename): + # This is messy because included files are textually included, resolving + # cimports (but not includes) relative to the including file. + all = set() + for include in self.parse_dependencies(filename)[1]: + include_path = join_path(os.path.dirname(filename), include) + if not path_exists(include_path): + include_path = self.context.find_include_file(include, source_file_path=filename) + if include_path: + if '.' + os.path.sep in include_path: + include_path = os.path.normpath(include_path) + all.add(include_path) + all.update(self.included_files(include_path)) + elif not self.quiet: + print(u"Unable to locate '%s' referenced from '%s'" % (filename, include)) + return all + + @cached_method + def cimports_externs_incdirs(self, filename): + # This is really ugly. Nested cimports are resolved with respect to the + # includer, but includes are resolved with respect to the includee. + cimports, includes, externs = self.parse_dependencies(filename)[:3] + cimports = set(cimports) + externs = set(externs) + incdirs = set() + for include in self.included_files(filename): + included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include) + cimports.update(included_cimports) + externs.update(included_externs) + incdirs.update(included_incdirs) + externs, incdir = normalize_existing(filename, externs) + if incdir: + incdirs.add(incdir) + return tuple(cimports), externs, incdirs + + def cimports(self, filename): + return self.cimports_externs_incdirs(filename)[0] + + def package(self, filename): + return package(filename) + + def fully_qualified_name(self, filename): + return fully_qualified_name(filename) + + @cached_method + def find_pxd(self, module, filename=None): + is_relative = module[0] == '.' + if is_relative and not filename: + raise NotImplementedError("New relative imports.") + if filename is not None: + module_path = module.split('.') + if is_relative: + module_path.pop(0) # just explicitly relative + package_path = list(self.package(filename)) + while module_path and not module_path[0]: + try: + package_path.pop() + except IndexError: + return None # FIXME: error? + module_path.pop(0) + relative = '.'.join(package_path + module_path) + pxd = self.context.find_pxd_file(relative, source_file_path=filename) + if pxd: + return pxd + if is_relative: + return None # FIXME: error? + return self.context.find_pxd_file(module, source_file_path=filename) + + @cached_method + def cimported_files(self, filename): + filename_root, filename_ext = os.path.splitext(filename) + if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'): + pxd_list = [filename_root + '.pxd'] + else: + pxd_list = [] + # Cimports generates all possible combinations package.module + # when imported as from package cimport module. + for module in self.cimports(filename): + if module[:7] == 'cython.' or module == 'cython': + continue + pxd_file = self.find_pxd(module, filename) + if pxd_file is not None: + pxd_list.append(pxd_file) + return tuple(pxd_list) + + @cached_method + def immediate_dependencies(self, filename): + all_deps = {filename} + all_deps.update(self.cimported_files(filename)) + all_deps.update(self.included_files(filename)) + return all_deps + + def all_dependencies(self, filename): + return self.transitive_merge(filename, self.immediate_dependencies, set.union) + + @cached_method + def timestamp(self, filename): + return os.path.getmtime(filename) + + def extract_timestamp(self, filename): + return self.timestamp(filename), filename + + def newest_dependency(self, filename): + return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)]) + + def transitive_fingerprint(self, filename, module, compilation_options): + r""" + Return a fingerprint of a cython file that is about to be cythonized. + + Fingerprints are looked up in future compilations. If the fingerprint + is found, the cythonization can be skipped. The fingerprint must + incorporate everything that has an influence on the generated code. + """ + try: + m = hashlib.sha1(__version__.encode('UTF-8')) + m.update(file_hash(filename).encode('UTF-8')) + for x in sorted(self.all_dependencies(filename)): + if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'): + m.update(file_hash(x).encode('UTF-8')) + # Include the module attributes that change the compilation result + # in the fingerprint. We do not iterate over module.__dict__ and + # include almost everything here as users might extend Extension + # with arbitrary (random) attributes that would lead to cache + # misses. + m.update(str(( + module.language, + getattr(module, 'py_limited_api', False), + getattr(module, 'np_pythran', False) + )).encode('UTF-8')) + + m.update(compilation_options.get_fingerprint().encode('UTF-8')) + return m.hexdigest() + except IOError: + return None + + def distutils_info0(self, filename): + info = self.parse_dependencies(filename)[3] + kwds = info.values + cimports, externs, incdirs = self.cimports_externs_incdirs(filename) + basedir = os.getcwd() + # Add dependencies on "cdef extern from ..." files + if externs: + externs = _make_relative(externs, basedir) + if 'depends' in kwds: + kwds['depends'] = list(set(kwds['depends']).union(externs)) + else: + kwds['depends'] = list(externs) + # Add include_dirs to ensure that the C compiler will find the + # "cdef extern from ..." files + if incdirs: + include_dirs = list(kwds.get('include_dirs', [])) + for inc in _make_relative(incdirs, basedir): + if inc not in include_dirs: + include_dirs.append(inc) + kwds['include_dirs'] = include_dirs + return info + + def distutils_info(self, filename, aliases=None, base=None): + return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge) + .subs(aliases) + .merge(base)) + + def transitive_merge(self, node, extract, merge): + try: + seen = self._transitive_cache[extract, merge] + except KeyError: + seen = self._transitive_cache[extract, merge] = {} + return self.transitive_merge_helper( + node, extract, merge, seen, {}, self.cimported_files)[0] + + def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing): + if node in seen: + return seen[node], None + deps = extract(node) + if node in stack: + return deps, node + try: + stack[node] = len(stack) + loop = None + for next in outgoing(node): + sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing) + if sub_loop is not None: + if loop is not None and stack[loop] < stack[sub_loop]: + pass + else: + loop = sub_loop + deps = merge(deps, sub_deps) + if loop == node: + loop = None + if loop is None: + seen[node] = deps + return deps, loop + finally: + del stack[node] + + +_dep_tree = None + +def create_dependency_tree(ctx=None, quiet=False): + global _dep_tree + if _dep_tree is None: + if ctx is None: + ctx = Context(["."], get_directive_defaults(), + options=CompilationOptions(default_options)) + _dep_tree = DependencyTree(ctx, quiet=quiet) + return _dep_tree + + +# If this changes, change also docs/src/reference/compilation.rst +# which mentions this function +def default_create_extension(template, kwds): + if 'depends' in kwds: + include_dirs = kwds.get('include_dirs', []) + ["."] + depends = resolve_depends(kwds['depends'], include_dirs) + kwds['depends'] = sorted(set(depends + template.depends)) + + t = template.__class__ + ext = t(**kwds) + metadata = dict(distutils=kwds, module_name=kwds['name']) + return (ext, metadata) + + +# This may be useful for advanced users? +def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None, + exclude_failures=False): + if language is not None: + print('Warning: passing language={0!r} to cythonize() is deprecated. ' + 'Instead, put "# distutils: language={0}" in your .pyx or .pxd file(s)'.format(language)) + if exclude is None: + exclude = [] + if patterns is None: + return [], {} + elif isinstance(patterns, basestring) or not isinstance(patterns, Iterable): + patterns = [patterns] + + from distutils.extension import Extension + if 'setuptools' in sys.modules: + # Support setuptools Extension instances as well. + extension_classes = ( + Extension, # should normally be the same as 'setuptools.extension._Extension' + sys.modules['setuptools.extension']._Extension, + sys.modules['setuptools'].Extension, + ) + else: + extension_classes = (Extension,) + + explicit_modules = {m.name for m in patterns if isinstance(m, extension_classes)} + deps = create_dependency_tree(ctx, quiet=quiet) + + to_exclude = set() + if not isinstance(exclude, list): + exclude = [exclude] + for pattern in exclude: + to_exclude.update(map(os.path.abspath, extended_iglob(pattern))) + + module_list = [] + module_metadata = {} + + # if no create_extension() function is defined, use a simple + # default function. + create_extension = ctx.options.create_extension or default_create_extension + + seen = set() + for pattern in patterns: + if not isinstance(pattern, extension_classes): + pattern = encode_filename_in_py2(pattern) + if isinstance(pattern, str): + filepattern = pattern + template = Extension(pattern, []) # Fake Extension without sources + name = '*' + base = None + ext_language = language + elif isinstance(pattern, extension_classes): + cython_sources = [s for s in pattern.sources + if os.path.splitext(s)[1] in ('.py', '.pyx')] + if cython_sources: + filepattern = cython_sources[0] + if len(cython_sources) > 1: + print(u"Warning: Multiple cython sources found for extension '%s': %s\n" + u"See https://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html " + u"for sharing declarations among Cython files." % (pattern.name, cython_sources)) + else: + # ignore non-cython modules + module_list.append(pattern) + continue + template = pattern + name = template.name + base = DistutilsInfo(exn=template) + ext_language = None # do not override whatever the Extension says + else: + msg = str("pattern is not of type str nor subclass of Extension (%s)" + " but of type %s and class %s" % (repr(Extension), + type(pattern), + pattern.__class__)) + raise TypeError(msg) + + for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern): + if os.path.abspath(file) in to_exclude: + continue + module_name = deps.fully_qualified_name(file) + if '*' in name: + if module_name in explicit_modules: + continue + elif name: + module_name = name + + Utils.raise_error_if_module_name_forbidden(module_name) + + if module_name not in seen: + try: + kwds = deps.distutils_info(file, aliases, base).values + except Exception: + if exclude_failures: + continue + raise + if base is not None: + for key, value in base.values.items(): + if key not in kwds: + kwds[key] = value + + kwds['name'] = module_name + + sources = [file] + [m for m in template.sources if m != filepattern] + if 'sources' in kwds: + # allow users to add .c files etc. + for source in kwds['sources']: + source = encode_filename_in_py2(source) + if source not in sources: + sources.append(source) + kwds['sources'] = sources + + if ext_language and 'language' not in kwds: + kwds['language'] = ext_language + + np_pythran = kwds.pop('np_pythran', False) + + # Create the new extension + m, metadata = create_extension(template, kwds) + m.np_pythran = np_pythran or getattr(m, 'np_pythran', False) + if m.np_pythran: + update_pythran_extension(m) + module_list.append(m) + + # Store metadata (this will be written as JSON in the + # generated C file but otherwise has no purpose) + module_metadata[module_name] = metadata + + if file not in m.sources: + # Old setuptools unconditionally replaces .pyx with .c/.cpp + target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c') + try: + m.sources.remove(target_file) + except ValueError: + # never seen this in the wild, but probably better to warn about this unexpected case + print(u"Warning: Cython source file not found in sources list, adding %s" % file) + m.sources.insert(0, file) + seen.add(name) + return module_list, module_metadata + + +# This is the user-exposed entry point. +def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=None, language=None, + exclude_failures=False, show_all_warnings=False, **options): + """ + Compile a set of source modules into C/C++ files and return a list of distutils + Extension objects for them. + + :param module_list: As module list, pass either a glob pattern, a list of glob + patterns or a list of Extension objects. The latter + allows you to configure the extensions separately + through the normal distutils options. + You can also pass Extension objects that have + glob patterns as their sources. Then, cythonize + will resolve the pattern and create a + copy of the Extension for every matching file. + + :param exclude: When passing glob patterns as ``module_list``, you can exclude certain + module names explicitly by passing them into the ``exclude`` option. + + :param nthreads: The number of concurrent builds for parallel compilation + (requires the ``multiprocessing`` module). + + :param aliases: If you want to use compiler directives like ``# distutils: ...`` but + can only know at compile time (when running the ``setup.py``) which values + to use, you can use aliases and pass a dictionary mapping those aliases + to Python strings when calling :func:`cythonize`. As an example, say you + want to use the compiler + directive ``# distutils: include_dirs = ../static_libs/include/`` + but this path isn't always fixed and you want to find it when running + the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, + find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python + variable called ``foo`` as a string, and then call + ``cythonize(..., aliases={'MY_HEADERS': foo})``. + + :param quiet: If True, Cython won't print error, warning, or status messages during the + compilation. + + :param force: Forces the recompilation of the Cython modules, even if the timestamps + don't indicate that a recompilation is necessary. + + :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this + will be determined at a per-file level based on compiler directives. This + affects only modules found based on file names. Extension instances passed + into :func:`cythonize` will not be changed. It is recommended to rather + use the compiler directive ``# distutils: language = c++`` than this option. + + :param exclude_failures: For a broad 'try to compile' mode that ignores compilation + failures and simply excludes the failed extensions, + pass ``exclude_failures=True``. Note that this only + really makes sense for compiling ``.py`` files which can also + be used without compilation. + + :param show_all_warnings: By default, not all Cython warnings are printed. + Set to true to show all warnings. + + :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` + files compiled. The HTML file gives an indication + of how much Python interaction there is in + each of the source code lines, compared to plain C code. + It also allows you to see the C/C++ code + generated for each line of Cython code. This report is invaluable when + optimizing a function for speed, + and for determining when to :ref:`release the GIL `: + in general, a ``nogil`` block may contain only "white" code. + See examples in :ref:`determining_where_to_add_types` or + :ref:`primes`. + + + :param annotate-fullc: If ``True`` will produce a colorized HTML version of + the source which includes entire generated C/C++-code. + + + :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: + ``compiler_directives={'embedsignature': True}``. + See :ref:`compiler-directives`. + + :param depfile: produce depfiles for the sources if True. + """ + if exclude is None: + exclude = [] + if 'include_path' not in options: + options['include_path'] = ['.'] + if 'common_utility_include_dir' in options: + safe_makedirs(options['common_utility_include_dir']) + + depfile = options.pop('depfile', None) + + if pythran is None: + pythran_options = None + else: + pythran_options = CompilationOptions(**options) + pythran_options.cplus = True + pythran_options.np_pythran = True + + if force is None: + force = os.environ.get("CYTHON_FORCE_REGEN") == "1" # allow global overrides for build systems + + c_options = CompilationOptions(**options) + cpp_options = CompilationOptions(**options); cpp_options.cplus = True + ctx = Context.from_options(c_options) + options = c_options + module_list, module_metadata = create_extension_list( + module_list, + exclude=exclude, + ctx=ctx, + quiet=quiet, + exclude_failures=exclude_failures, + language=language, + aliases=aliases) + + fix_windows_unicode_modules(module_list) + + deps = create_dependency_tree(ctx, quiet=quiet) + build_dir = getattr(options, 'build_dir', None) + + def copy_to_build_dir(filepath, root=os.getcwd()): + filepath_abs = os.path.abspath(filepath) + if os.path.isabs(filepath): + filepath = filepath_abs + if filepath_abs.startswith(root): + # distutil extension depends are relative to cwd + mod_dir = join_path(build_dir, + os.path.dirname(_relpath(filepath, root))) + copy_once_if_newer(filepath_abs, mod_dir) + + modules_by_cfile = collections.defaultdict(list) + to_compile = [] + for m in module_list: + if build_dir: + for dep in m.depends: + copy_to_build_dir(dep) + + cy_sources = [ + source for source in m.sources + if os.path.splitext(source)[1] in ('.pyx', '.py')] + if len(cy_sources) == 1: + # normal "special" case: believe the Extension module name to allow user overrides + full_module_name = m.name + else: + # infer FQMN from source files + full_module_name = None + + new_sources = [] + for source in m.sources: + base, ext = os.path.splitext(source) + if ext in ('.pyx', '.py'): + if m.np_pythran: + c_file = base + '.cpp' + options = pythran_options + elif m.language == 'c++': + c_file = base + '.cpp' + options = cpp_options + else: + c_file = base + '.c' + options = c_options + + # setup for out of place build directory if enabled + if build_dir: + if os.path.isabs(c_file): + c_file = os.path.splitdrive(c_file)[1] + c_file = c_file.split(os.sep, 1)[1] + c_file = os.path.join(build_dir, c_file) + dir = os.path.dirname(c_file) + safe_makedirs_once(dir) + + # write out the depfile, if requested + if depfile: + dependencies = deps.all_dependencies(source) + write_depfile(c_file, source, dependencies) + + # Missing files and those generated by other Cython versions should always be recreated. + if Utils.file_generated_by_this_cython(c_file): + c_timestamp = os.path.getmtime(c_file) + else: + c_timestamp = -1 + + # Priority goes first to modified files, second to direct + # dependents, and finally to indirect dependents. + if c_timestamp < deps.timestamp(source): + dep_timestamp, dep = deps.timestamp(source), source + priority = 0 + else: + dep_timestamp, dep = deps.newest_dependency(source) + priority = 2 - (dep in deps.immediate_dependencies(source)) + if force or c_timestamp < dep_timestamp: + if not quiet and not force: + if source == dep: + print(u"Compiling %s because it changed." % Utils.decode_filename(source)) + else: + print(u"Compiling %s because it depends on %s." % ( + Utils.decode_filename(source), + Utils.decode_filename(dep), + )) + if not force and options.cache: + fingerprint = deps.transitive_fingerprint(source, m, options) + else: + fingerprint = None + to_compile.append(( + priority, source, c_file, fingerprint, quiet, + options, not exclude_failures, module_metadata.get(m.name), + full_module_name, show_all_warnings)) + new_sources.append(c_file) + modules_by_cfile[c_file].append(m) + else: + new_sources.append(source) + if build_dir: + copy_to_build_dir(source) + m.sources = new_sources + + if options.cache: + if not os.path.exists(options.cache): + os.makedirs(options.cache) + to_compile.sort() + # Drop "priority" component of "to_compile" entries and add a + # simple progress indicator. + N = len(to_compile) + progress_fmt = "[{0:%d}/{1}] " % len(str(N)) + for i in range(N): + progress = progress_fmt.format(i+1, N) + to_compile[i] = to_compile[i][1:] + (progress,) + + if N <= 1: + nthreads = 0 + if nthreads: + import multiprocessing + pool = multiprocessing.Pool( + nthreads, initializer=_init_multiprocessing_helper) + # This is a bit more involved than it should be, because KeyboardInterrupts + # break the multiprocessing workers when using a normal pool.map(). + # See, for example: + # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt + try: + result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1) + pool.close() + while not result.ready(): + try: + result.get(99999) # seconds + except multiprocessing.TimeoutError: + pass + except KeyboardInterrupt: + pool.terminate() + raise + pool.join() + else: + for args in to_compile: + cythonize_one(*args) + + if exclude_failures: + failed_modules = set() + for c_file, modules in modules_by_cfile.items(): + if not os.path.exists(c_file): + failed_modules.update(modules) + elif os.path.getsize(c_file) < 200: + f = io_open(c_file, 'r', encoding='iso8859-1') + try: + if f.read(len('#error ')) == '#error ': + # dead compilation result + failed_modules.update(modules) + finally: + f.close() + if failed_modules: + for module in failed_modules: + module_list.remove(module) + print(u"Failed compilations: %s" % ', '.join(sorted([ + module.name for module in failed_modules]))) + + if options.cache: + cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100)) + # cythonize() is often followed by the (non-Python-buffered) + # compiler output, flush now to avoid interleaving output. + sys.stdout.flush() + return module_list + + +def fix_windows_unicode_modules(module_list): + # Hack around a distutils 3.[5678] bug on Windows for unicode module names. + # https://bugs.python.org/issue39432 + if sys.platform != "win32": + return + if sys.version_info < (3, 5) or sys.version_info >= (3, 8, 2): + return + + def make_filtered_list(ignored_symbol, old_entries): + class FilteredExportSymbols(list): + # export_symbols for unicode filename cause link errors on Windows + # Cython doesn't need them (it already defines PyInit with the correct linkage) + # so use this class as a temporary fix to stop them from being generated + def __contains__(self, val): + # so distutils doesn't "helpfully" add PyInit_ + return val == ignored_symbol or list.__contains__(self, val) + + filtered_list = FilteredExportSymbols(old_entries) + if old_entries: + filtered_list.extend(name for name in old_entries if name != ignored_symbol) + return filtered_list + + for m in module_list: + # TODO: use m.name.isascii() in Py3.7+ + try: + m.name.encode("ascii") + continue + except UnicodeEncodeError: + pass + m.export_symbols = make_filtered_list( + "PyInit_" + m.name.rsplit(".", 1)[-1], + m.export_symbols, + ) + + +if os.environ.get('XML_RESULTS'): + compile_result_dir = os.environ['XML_RESULTS'] + def record_results(func): + def with_record(*args): + t = time.time() + success = True + try: + try: + func(*args) + except: + success = False + finally: + t = time.time() - t + module = fully_qualified_name(args[0]) + name = "cythonize." + module + failures = 1 - success + if success: + failure_item = "" + else: + failure_item = "failure" + output = open(os.path.join(compile_result_dir, name + ".xml"), "w") + output.write(""" + + + + %(failure_item)s + + + """.strip() % locals()) + output.close() + return with_record +else: + def record_results(func): + return func + + +# TODO: Share context? Issue: pyx processing leaks into pxd module +@record_results +def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, + raise_on_failure=True, embedded_metadata=None, + full_module_name=None, show_all_warnings=False, + progress=""): + from ..Compiler.Main import compile_single, default_options + from ..Compiler.Errors import CompileError, PyrexError + + if fingerprint: + if not os.path.exists(options.cache): + safe_makedirs(options.cache) + # Cython-generated c files are highly compressible. + # (E.g. a compression ratio of about 10 for Sage). + fingerprint_file_base = join_path( + options.cache, "%s-%s" % (os.path.basename(c_file), fingerprint)) + gz_fingerprint_file = fingerprint_file_base + gzip_ext + zip_fingerprint_file = fingerprint_file_base + '.zip' + if os.path.exists(gz_fingerprint_file) or os.path.exists(zip_fingerprint_file): + if not quiet: + print(u"%sFound compiled %s in cache" % (progress, pyx_file)) + if os.path.exists(gz_fingerprint_file): + os.utime(gz_fingerprint_file, None) + with contextlib.closing(gzip_open(gz_fingerprint_file, 'rb')) as g: + with contextlib.closing(open(c_file, 'wb')) as f: + shutil.copyfileobj(g, f) + else: + os.utime(zip_fingerprint_file, None) + dirname = os.path.dirname(c_file) + with contextlib.closing(zipfile.ZipFile(zip_fingerprint_file)) as z: + for artifact in z.namelist(): + z.extract(artifact, os.path.join(dirname, artifact)) + return + if not quiet: + print(u"%sCythonizing %s" % (progress, Utils.decode_filename(pyx_file))) + if options is None: + options = CompilationOptions(default_options) + options.output_file = c_file + options.embedded_metadata = embedded_metadata + + old_warning_level = Errors.LEVEL + if show_all_warnings: + Errors.LEVEL = 0 + + any_failures = 0 + try: + result = compile_single(pyx_file, options, full_module_name=full_module_name) + if result.num_errors > 0: + any_failures = 1 + except (EnvironmentError, PyrexError) as e: + sys.stderr.write('%s\n' % e) + any_failures = 1 + # XXX + import traceback + traceback.print_exc() + except Exception: + if raise_on_failure: + raise + import traceback + traceback.print_exc() + any_failures = 1 + finally: + if show_all_warnings: + Errors.LEVEL = old_warning_level + + if any_failures: + if raise_on_failure: + raise CompileError(None, pyx_file) + elif os.path.exists(c_file): + os.remove(c_file) + elif fingerprint: + artifacts = list(filter(None, [ + getattr(result, attr, None) + for attr in ('c_file', 'h_file', 'api_file', 'i_file')])) + if len(artifacts) == 1: + fingerprint_file = gz_fingerprint_file + with contextlib.closing(open(c_file, 'rb')) as f: + with contextlib.closing(gzip_open(fingerprint_file + '.tmp', 'wb')) as g: + shutil.copyfileobj(f, g) + else: + fingerprint_file = zip_fingerprint_file + with contextlib.closing(zipfile.ZipFile( + fingerprint_file + '.tmp', 'w', zipfile_compression_mode)) as zip: + for artifact in artifacts: + zip.write(artifact, os.path.basename(artifact)) + os.rename(fingerprint_file + '.tmp', fingerprint_file) + + +def cythonize_one_helper(m): + import traceback + try: + return cythonize_one(*m) + except Exception: + traceback.print_exc() + raise + + +def _init_multiprocessing_helper(): + # KeyboardInterrupt kills workers, so don't let them get it + import signal + signal.signal(signal.SIGINT, signal.SIG_IGN) + + +def cleanup_cache(cache, target_size, ratio=.85): + try: + p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE) + stdout, _ = p.communicate() + res = p.wait() + if res == 0: + total_size = 1024 * int(stdout.strip().split()[0]) + if total_size < target_size: + return + except (OSError, ValueError): + pass + total_size = 0 + all = [] + for file in os.listdir(cache): + path = join_path(cache, file) + s = os.stat(path) + total_size += s.st_size + all.append((s.st_atime, s.st_size, path)) + if total_size > target_size: + for time, size, file in reversed(sorted(all)): + os.unlink(file) + total_size -= size + if total_size < target_size * ratio: + break diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Distutils.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Distutils.py new file mode 100644 index 0000000000000000000000000000000000000000..3efcc0d7b5101f5b5fbacfaa47c9afe760dbaaa6 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Distutils.py @@ -0,0 +1 @@ +from Cython.Distutils.build_ext import build_ext diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Inline.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Inline.py new file mode 100644 index 0000000000000000000000000000000000000000..fe62d4ca9ea41d72ca1b8a1eb0963baac1bc521e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Inline.py @@ -0,0 +1,372 @@ +from __future__ import absolute_import + +import hashlib +import inspect +import os +import re +import sys + +from distutils.core import Distribution, Extension +from distutils.command.build_ext import build_ext + +import Cython +from ..Compiler.Main import Context +from ..Compiler.Options import (default_options, CompilationOptions, + get_directive_defaults) + +from ..Compiler.Visitor import CythonTransform, EnvTransform +from ..Compiler.ParseTreeTransforms import SkipDeclarations +from ..Compiler.TreeFragment import parse_from_strings +from ..Compiler.StringEncoding import _unicode +from .Dependencies import strip_string_literals, cythonize, cached_function +from ..Compiler import Pipeline +from ..Utils import get_cython_cache_dir +import cython as cython_module + + +IS_PY3 = sys.version_info >= (3,) + +# A utility function to convert user-supplied ASCII strings to unicode. +if not IS_PY3: + def to_unicode(s): + if isinstance(s, bytes): + return s.decode('ascii') + else: + return s +else: + to_unicode = lambda x: x + + +if sys.version_info < (3, 5): + import imp + def load_dynamic(name, module_path): + return imp.load_dynamic(name, module_path) +else: + import importlib.util + from importlib.machinery import ExtensionFileLoader + + def load_dynamic(name, path): + spec = importlib.util.spec_from_file_location(name, loader=ExtensionFileLoader(name, path)) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +class UnboundSymbols(EnvTransform, SkipDeclarations): + def __init__(self): + super(EnvTransform, self).__init__(context=None) + self.unbound = set() + def visit_NameNode(self, node): + if not self.current_env().lookup(node.name): + self.unbound.add(node.name) + return node + def __call__(self, node): + super(UnboundSymbols, self).__call__(node) + return self.unbound + + +@cached_function +def unbound_symbols(code, context=None): + code = to_unicode(code) + if context is None: + context = Context([], get_directive_defaults(), + options=CompilationOptions(default_options)) + from ..Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform + tree = parse_from_strings('(tree fragment)', code) + for phase in Pipeline.create_pipeline(context, 'pyx'): + if phase is None: + continue + tree = phase(tree) + if isinstance(phase, AnalyseDeclarationsTransform): + break + try: + import builtins + except ImportError: + import __builtin__ as builtins + return tuple(UnboundSymbols()(tree) - set(dir(builtins))) + + +def unsafe_type(arg, context=None): + py_type = type(arg) + if py_type is int: + return 'long' + else: + return safe_type(arg, context) + + +def safe_type(arg, context=None): + py_type = type(arg) + if py_type in (list, tuple, dict, str): + return py_type.__name__ + elif py_type is complex: + return 'double complex' + elif py_type is float: + return 'double' + elif py_type is bool: + return 'bint' + elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray): + return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim) + else: + for base_type in py_type.__mro__: + if base_type.__module__ in ('__builtin__', 'builtins'): + return 'object' + module = context.find_module(base_type.__module__, need_pxd=False) + if module: + entry = module.lookup(base_type.__name__) + if entry.is_type: + return '%s.%s' % (base_type.__module__, base_type.__name__) + return 'object' + + +def _get_build_extension(): + dist = Distribution() + # Ensure the build respects distutils configuration by parsing + # the configuration files + config_files = dist.find_config_files() + dist.parse_config_files(config_files) + build_extension = build_ext(dist) + build_extension.finalize_options() + return build_extension + + +@cached_function +def _create_context(cython_include_dirs): + return Context( + list(cython_include_dirs), + get_directive_defaults(), + options=CompilationOptions(default_options) + ) + + +_cython_inline_cache = {} +_cython_inline_default_context = _create_context(('.',)) + + +def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None): + for symbol in unbound_symbols: + if symbol not in kwds: + if locals is None or globals is None: + calling_frame = inspect.currentframe().f_back.f_back.f_back + if locals is None: + locals = calling_frame.f_locals + if globals is None: + globals = calling_frame.f_globals + if not isinstance(locals, dict): + # FrameLocalsProxy is stricter than dict on how it looks up keys + # and this means our "EncodedStrings" don't match the keys in locals. + # Therefore copy to a dict. + locals = dict(locals) + if symbol in locals: + kwds[symbol] = locals[symbol] + elif symbol in globals: + kwds[symbol] = globals[symbol] + else: + print("Couldn't find %r" % symbol) + + +def _inline_key(orig_code, arg_sigs, language_level): + key = orig_code, arg_sigs, sys.version_info, sys.executable, language_level, Cython.__version__ + return hashlib.sha1(_unicode(key).encode('utf-8')).hexdigest() + + +def cython_inline(code, get_type=unsafe_type, + lib_dir=os.path.join(get_cython_cache_dir(), 'inline'), + cython_include_dirs=None, cython_compiler_directives=None, + force=False, quiet=False, locals=None, globals=None, language_level=None, **kwds): + + if get_type is None: + get_type = lambda x: 'object' + ctx = _create_context(tuple(cython_include_dirs)) if cython_include_dirs else _cython_inline_default_context + + cython_compiler_directives = dict(cython_compiler_directives) if cython_compiler_directives else {} + if language_level is None and 'language_level' not in cython_compiler_directives: + language_level = '3str' + if language_level is not None: + cython_compiler_directives['language_level'] = language_level + + key_hash = None + + # Fast path if this has been called in this session. + _unbound_symbols = _cython_inline_cache.get(code) + if _unbound_symbols is not None: + _populate_unbound(kwds, _unbound_symbols, locals, globals) + args = sorted(kwds.items()) + arg_sigs = tuple([(get_type(value, ctx), arg) for arg, value in args]) + key_hash = _inline_key(code, arg_sigs, language_level) + invoke = _cython_inline_cache.get((code, arg_sigs, key_hash)) + if invoke is not None: + arg_list = [arg[1] for arg in args] + return invoke(*arg_list) + + orig_code = code + code = to_unicode(code) + code, literals = strip_string_literals(code) + code = strip_common_indent(code) + if locals is None: + locals = inspect.currentframe().f_back.f_back.f_locals + if globals is None: + globals = inspect.currentframe().f_back.f_back.f_globals + try: + _cython_inline_cache[orig_code] = _unbound_symbols = unbound_symbols(code) + _populate_unbound(kwds, _unbound_symbols, locals, globals) + except AssertionError: + if not quiet: + # Parsing from strings not fully supported (e.g. cimports). + print("Could not parse code as a string (to extract unbound symbols).") + + cimports = [] + for name, arg in list(kwds.items()): + if arg is cython_module: + cimports.append('\ncimport cython as %s' % name) + del kwds[name] + arg_names = sorted(kwds) + arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names]) + if key_hash is None: + key_hash = _inline_key(orig_code, arg_sigs, language_level) + module_name = "_cython_inline_" + key_hash + + if module_name in sys.modules: + module = sys.modules[module_name] + + else: + build_extension = None + if cython_inline.so_ext is None: + # Figure out and cache current extension suffix + build_extension = _get_build_extension() + cython_inline.so_ext = build_extension.get_ext_filename('') + + lib_dir = os.path.abspath(lib_dir) + module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext) + + if not os.path.exists(lib_dir): + os.makedirs(lib_dir) + if force or not os.path.isfile(module_path): + cflags = [] + define_macros = [] + c_include_dirs = [] + qualified = re.compile(r'([.\w]+)[.]') + for type, _ in arg_sigs: + m = qualified.match(type) + if m: + cimports.append('\ncimport %s' % m.groups()[0]) + # one special case + if m.groups()[0] == 'numpy': + import numpy + c_include_dirs.append(numpy.get_include()) + define_macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")) + # cflags.append('-Wno-unused') + module_body, func_body = extract_func_code(code) + params = ', '.join(['%s %s' % a for a in arg_sigs]) + module_code = """ +%(module_body)s +%(cimports)s +def __invoke(%(params)s): +%(func_body)s + return locals() + """ % {'cimports': '\n'.join(cimports), + 'module_body': module_body, + 'params': params, + 'func_body': func_body } + for key, value in literals.items(): + module_code = module_code.replace(key, value) + pyx_file = os.path.join(lib_dir, module_name + '.pyx') + fh = open(pyx_file, 'w') + try: + fh.write(module_code) + finally: + fh.close() + extension = Extension( + name=module_name, + sources=[pyx_file], + include_dirs=c_include_dirs or None, + extra_compile_args=cflags or None, + define_macros=define_macros or None, + ) + if build_extension is None: + build_extension = _get_build_extension() + build_extension.extensions = cythonize( + [extension], + include_path=cython_include_dirs or ['.'], + compiler_directives=cython_compiler_directives, + quiet=quiet) + build_extension.build_temp = os.path.dirname(pyx_file) + build_extension.build_lib = lib_dir + build_extension.run() + + if sys.platform == 'win32' and sys.version_info >= (3, 8): + with os.add_dll_directory(os.path.abspath(lib_dir)): + module = load_dynamic(module_name, module_path) + else: + module = load_dynamic(module_name, module_path) + + _cython_inline_cache[orig_code, arg_sigs, key_hash] = module.__invoke + arg_list = [kwds[arg] for arg in arg_names] + return module.__invoke(*arg_list) + + +# Cached suffix used by cython_inline above. None should get +# overridden with actual value upon the first cython_inline invocation +cython_inline.so_ext = None + +_find_non_space = re.compile('[^ ]').search + + +def strip_common_indent(code): + min_indent = None + lines = code.splitlines() + for line in lines: + match = _find_non_space(line) + if not match: + continue # blank + indent = match.start() + if line[indent] == '#': + continue # comment + if min_indent is None or min_indent > indent: + min_indent = indent + for ix, line in enumerate(lines): + match = _find_non_space(line) + if not match or not line or line[indent:indent+1] == '#': + continue + lines[ix] = line[min_indent:] + return '\n'.join(lines) + + +module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))') +def extract_func_code(code): + module = [] + function = [] + current = function + code = code.replace('\t', ' ') + lines = code.split('\n') + for line in lines: + if not line.startswith(' '): + if module_statement.match(line): + current = module + else: + current = function + current.append(line) + return '\n'.join(module), ' ' + '\n '.join(function) + + +def get_body(source): + ix = source.index(':') + if source[:5] == 'lambda': + return "return %s" % source[ix+1:] + else: + return source[ix+1:] + + +# Lots to be done here... It would be especially cool if compiled functions +# could invoke each other quickly. +class RuntimeCompiledFunction(object): + + def __init__(self, f): + self._f = f + self._body = get_body(inspect.getsource(f)) + + def __call__(self, *args, **kwds): + all = inspect.getcallargs(self._f, *args, **kwds) + if IS_PY3: + return cython_inline(self._body, locals=self._f.__globals__, globals=self._f.__globals__, **all) + else: + return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/IpythonMagic.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/IpythonMagic.py new file mode 100644 index 0000000000000000000000000000000000000000..315862e3660cfc33b1272195ea1a794723ec0769 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/IpythonMagic.py @@ -0,0 +1,572 @@ +# -*- coding: utf-8 -*- +""" +===================== +Cython related magics +===================== + +Magic command interface for interactive work with Cython + +.. note:: + + The ``Cython`` package needs to be installed separately. It + can be obtained using ``easy_install`` or ``pip``. + +Usage +===== + +To enable the magics below, execute ``%load_ext cython``. + +``%%cython`` + +{CYTHON_DOC} + +``%%cython_inline`` + +{CYTHON_INLINE_DOC} + +``%%cython_pyximport`` + +{CYTHON_PYXIMPORT_DOC} + +Author: +* Brian Granger + +Code moved from IPython and adapted by: +* Martín Gaitán + +Parts of this code were taken from Cython.inline. +""" +#----------------------------------------------------------------------------- +# Copyright (C) 2010-2011, IPython Development Team. +# +# Distributed under the terms of the Modified BSD License. +# +# The full license is in the file ipython-COPYING.rst, distributed with this software. +#----------------------------------------------------------------------------- + +from __future__ import absolute_import, print_function + +import io +import os +import re +import sys +import time +import copy +import distutils.log +import textwrap + +IO_ENCODING = sys.getfilesystemencoding() +IS_PY2 = sys.version_info[0] < 3 + +import hashlib +from distutils.core import Distribution, Extension +from distutils.command.build_ext import build_ext + +from IPython.core import display +from IPython.core import magic_arguments +from IPython.core.magic import Magics, magics_class, cell_magic +try: + from IPython.paths import get_ipython_cache_dir +except ImportError: + # older IPython version + from IPython.utils.path import get_ipython_cache_dir +from IPython.utils.text import dedent + +from ..Shadow import __version__ as cython_version +from ..Compiler.Errors import CompileError +from .Inline import cython_inline, load_dynamic +from .Dependencies import cythonize +from ..Utils import captured_fd, print_captured + + +PGO_CONFIG = { + 'gcc': { + 'gen': ['-fprofile-generate', '-fprofile-dir={TEMPDIR}'], + 'use': ['-fprofile-use', '-fprofile-correction', '-fprofile-dir={TEMPDIR}'], + }, + # blind copy from 'configure' script in CPython 3.7 + 'icc': { + 'gen': ['-prof-gen'], + 'use': ['-prof-use'], + } +} +PGO_CONFIG['mingw32'] = PGO_CONFIG['gcc'] + + +if IS_PY2: + def encode_fs(name): + return name if isinstance(name, bytes) else name.encode(IO_ENCODING) +else: + def encode_fs(name): + return name + + +@magics_class +class CythonMagics(Magics): + + def __init__(self, shell): + super(CythonMagics, self).__init__(shell) + self._reloads = {} + self._code_cache = {} + self._pyximport_installed = False + + def _import_all(self, module): + mdict = module.__dict__ + if '__all__' in mdict: + keys = mdict['__all__'] + else: + keys = [k for k in mdict if not k.startswith('_')] + + for k in keys: + try: + self.shell.push({k: mdict[k]}) + except KeyError: + msg = "'module' object has no attribute '%s'" % k + raise AttributeError(msg) + + @cell_magic + def cython_inline(self, line, cell): + """Compile and run a Cython code cell using Cython.inline. + + This magic simply passes the body of the cell to Cython.inline + and returns the result. If the variables `a` and `b` are defined + in the user's namespace, here is a simple example that returns + their sum:: + + %%cython_inline + return a+b + + For most purposes, we recommend the usage of the `%%cython` magic. + """ + locs = self.shell.user_global_ns + globs = self.shell.user_ns + return cython_inline(cell, locals=locs, globals=globs) + + @cell_magic + def cython_pyximport(self, line, cell): + """Compile and import a Cython code cell using pyximport. + + The contents of the cell are written to a `.pyx` file in the current + working directory, which is then imported using `pyximport`. This + magic requires a module name to be passed:: + + %%cython_pyximport modulename + def f(x): + return 2.0*x + + The compiled module is then imported and all of its symbols are + injected into the user's namespace. For most purposes, we recommend + the usage of the `%%cython` magic. + """ + module_name = line.strip() + if not module_name: + raise ValueError('module name must be given') + fname = module_name + '.pyx' + with io.open(fname, 'w', encoding='utf-8') as f: + f.write(cell) + if 'pyximport' not in sys.modules or not self._pyximport_installed: + import pyximport + pyximport.install() + self._pyximport_installed = True + if module_name in self._reloads: + module = self._reloads[module_name] + # Note: reloading extension modules is not actually supported + # (requires PEP-489 reinitialisation support). + # Don't know why this should ever have worked as it reads here. + # All we really need to do is to update the globals below. + #reload(module) + else: + __import__(module_name) + module = sys.modules[module_name] + self._reloads[module_name] = module + self._import_all(module) + + @magic_arguments.magic_arguments() + @magic_arguments.argument( + '-a', '--annotate', action='store_const', const='default', dest='annotate', + help="Produce a colorized HTML version of the source." + ) + @magic_arguments.argument( + '--annotate-fullc', action='store_const', const='fullc', dest='annotate', + help="Produce a colorized HTML version of the source " + "which includes entire generated C/C++-code." + ) + @magic_arguments.argument( + '-+', '--cplus', action='store_true', default=False, + help="Output a C++ rather than C file." + ) + @magic_arguments.argument( + '-3', dest='language_level', action='store_const', const=3, default=None, + help="Select Python 3 syntax." + ) + @magic_arguments.argument( + '-2', dest='language_level', action='store_const', const=2, default=None, + help="Select Python 2 syntax." + ) + @magic_arguments.argument( + '-f', '--force', action='store_true', default=False, + help="Force the compilation of a new module, even if the source has been " + "previously compiled." + ) + @magic_arguments.argument( + '-c', '--compile-args', action='append', default=[], + help="Extra flags to pass to compiler via the `extra_compile_args` " + "Extension flag (can be specified multiple times)." + ) + @magic_arguments.argument( + '--link-args', action='append', default=[], + help="Extra flags to pass to linker via the `extra_link_args` " + "Extension flag (can be specified multiple times)." + ) + @magic_arguments.argument( + '-l', '--lib', action='append', default=[], + help="Add a library to link the extension against (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '-n', '--name', + help="Specify a name for the Cython module." + ) + @magic_arguments.argument( + '-L', dest='library_dirs', metavar='dir', action='append', default=[], + help="Add a path to the list of library directories (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '-I', '--include', action='append', default=[], + help="Add a path to the list of include directories (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '-S', '--src', action='append', default=[], + help="Add a path to the list of src files (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '--pgo', dest='pgo', action='store_true', default=False, + help=("Enable profile guided optimisation in the C compiler. " + "Compiles the cell twice and executes it in between to generate a runtime profile.") + ) + @magic_arguments.argument( + '--verbose', dest='quiet', action='store_false', default=True, + help=("Print debug information like generated .c/.cpp file location " + "and exact gcc/g++ command invoked.") + ) + @cell_magic + def cython(self, line, cell): + """Compile and import everything from a Cython code cell. + + The contents of the cell are written to a `.pyx` file in the + directory `IPYTHONDIR/cython` using a filename with the hash of the + code. This file is then cythonized and compiled. The resulting module + is imported and all of its symbols are injected into the user's + namespace. The usage is similar to that of `%%cython_pyximport` but + you don't have to pass a module name:: + + %%cython + def f(x): + return 2.0*x + + To compile OpenMP codes, pass the required `--compile-args` + and `--link-args`. For example with gcc:: + + %%cython --compile-args=-fopenmp --link-args=-fopenmp + ... + + To enable profile guided optimisation, pass the ``--pgo`` option. + Note that the cell itself needs to take care of establishing a suitable + profile when executed. This can be done by implementing the functions to + optimise, and then calling them directly in the same cell on some realistic + training data like this:: + + %%cython --pgo + def critical_function(data): + for item in data: + ... + + # execute function several times to build profile + from somewhere import some_typical_data + for _ in range(100): + critical_function(some_typical_data) + + In Python 3.5 and later, you can distinguish between the profile and + non-profile runs as follows:: + + if "_pgo_" in __name__: + ... # execute critical code here + """ + args = magic_arguments.parse_argstring(self.cython, line) + code = cell if cell.endswith('\n') else cell + '\n' + lib_dir = os.path.join(get_ipython_cache_dir(), 'cython') + key = (code, line, sys.version_info, sys.executable, cython_version) + + if not os.path.exists(lib_dir): + os.makedirs(lib_dir) + + if args.pgo: + key += ('pgo',) + if args.force: + # Force a new module name by adding the current time to the + # key which is hashed to determine the module name. + key += (time.time(),) + + if args.name: + module_name = str(args.name) # no-op in Py3 + else: + module_name = "_cython_magic_" + hashlib.sha1(str(key).encode('utf-8')).hexdigest() + html_file = os.path.join(lib_dir, module_name + '.html') + module_path = os.path.join(lib_dir, module_name + self.so_ext) + + have_module = os.path.isfile(module_path) + need_cythonize = args.pgo or not have_module + + if args.annotate: + if not os.path.isfile(html_file): + need_cythonize = True + + extension = None + if need_cythonize: + extensions = self._cythonize(module_name, code, lib_dir, args, quiet=args.quiet) + if extensions is None: + # Compilation failed and printed error message + return None + assert len(extensions) == 1 + extension = extensions[0] + self._code_cache[key] = module_name + + if args.pgo: + self._profile_pgo_wrapper(extension, lib_dir) + + def print_compiler_output(stdout, stderr, where): + # On windows, errors are printed to stdout, we redirect both to sys.stderr. + print_captured(stdout, where, u"Content of stdout:\n") + print_captured(stderr, where, u"Content of stderr:\n") + + get_stderr = get_stdout = None + try: + with captured_fd(1) as get_stdout: + with captured_fd(2) as get_stderr: + self._build_extension( + extension, lib_dir, pgo_step_name='use' if args.pgo else None, quiet=args.quiet) + except (distutils.errors.CompileError, distutils.errors.LinkError): + # Build failed, print error message from compiler/linker + print_compiler_output(get_stdout(), get_stderr(), sys.stderr) + return None + + # Build seems ok, but we might still want to show any warnings that occurred + print_compiler_output(get_stdout(), get_stderr(), sys.stdout) + + module = load_dynamic(module_name, module_path) + self._import_all(module) + + if args.annotate: + try: + with io.open(html_file, encoding='utf-8') as f: + annotated_html = f.read() + except IOError as e: + # File could not be opened. Most likely the user has a version + # of Cython before 0.15.1 (when `cythonize` learned the + # `force` keyword argument) and has already compiled this + # exact source without annotation. + print('Cython completed successfully but the annotated ' + 'source could not be read.', file=sys.stderr) + print(e, file=sys.stderr) + else: + return display.HTML(self.clean_annotated_html(annotated_html)) + + def _profile_pgo_wrapper(self, extension, lib_dir): + """ + Generate a .c file for a separate extension module that calls the + module init function of the original module. This makes sure that the + PGO profiler sees the correct .o file of the final module, but it still + allows us to import the module under a different name for profiling, + before recompiling it into the PGO optimised module. Overwriting and + reimporting the same shared library is not portable. + """ + extension = copy.copy(extension) # shallow copy, do not modify sources in place! + module_name = extension.name + pgo_module_name = '_pgo_' + module_name + pgo_wrapper_c_file = os.path.join(lib_dir, pgo_module_name + '.c') + with io.open(pgo_wrapper_c_file, 'w', encoding='utf-8') as f: + f.write(textwrap.dedent(u""" + #include "Python.h" + #if PY_MAJOR_VERSION < 3 + extern PyMODINIT_FUNC init%(module_name)s(void); + PyMODINIT_FUNC init%(pgo_module_name)s(void); /*proto*/ + PyMODINIT_FUNC init%(pgo_module_name)s(void) { + PyObject *sys_modules; + init%(module_name)s(); if (PyErr_Occurred()) return; + sys_modules = PyImport_GetModuleDict(); /* borrowed, no exception, "never" fails */ + if (sys_modules) { + PyObject *module = PyDict_GetItemString(sys_modules, "%(module_name)s"); if (!module) return; + PyDict_SetItemString(sys_modules, "%(pgo_module_name)s", module); + Py_DECREF(module); + } + } + #else + extern PyMODINIT_FUNC PyInit_%(module_name)s(void); + PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void); /*proto*/ + PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void) { + return PyInit_%(module_name)s(); + } + #endif + """ % {'module_name': module_name, 'pgo_module_name': pgo_module_name})) + + extension.sources = extension.sources + [pgo_wrapper_c_file] # do not modify in place! + extension.name = pgo_module_name + + self._build_extension(extension, lib_dir, pgo_step_name='gen') + + # import and execute module code to generate profile + so_module_path = os.path.join(lib_dir, pgo_module_name + self.so_ext) + load_dynamic(pgo_module_name, so_module_path) + + def _cythonize(self, module_name, code, lib_dir, args, quiet=True): + pyx_file = os.path.join(lib_dir, module_name + '.pyx') + pyx_file = encode_fs(pyx_file) + + c_include_dirs = args.include + c_src_files = list(map(str, args.src)) + if 'numpy' in code: + import numpy + c_include_dirs.append(numpy.get_include()) + with io.open(pyx_file, 'w', encoding='utf-8') as f: + f.write(code) + extension = Extension( + name=module_name, + sources=[pyx_file] + c_src_files, + include_dirs=c_include_dirs, + library_dirs=args.library_dirs, + extra_compile_args=args.compile_args, + extra_link_args=args.link_args, + libraries=args.lib, + language='c++' if args.cplus else 'c', + ) + try: + opts = dict( + quiet=quiet, + annotate=args.annotate, + force=True, + language_level=min(3, sys.version_info[0]), + ) + if args.language_level is not None: + assert args.language_level in (2, 3) + opts['language_level'] = args.language_level + return cythonize([extension], **opts) + except CompileError: + return None + + def _build_extension(self, extension, lib_dir, temp_dir=None, pgo_step_name=None, quiet=True): + build_extension = self._get_build_extension( + extension, lib_dir=lib_dir, temp_dir=temp_dir, pgo_step_name=pgo_step_name) + old_threshold = None + try: + if not quiet: + old_threshold = distutils.log.set_threshold(distutils.log.DEBUG) + build_extension.run() + finally: + if not quiet and old_threshold is not None: + distutils.log.set_threshold(old_threshold) + + def _add_pgo_flags(self, build_extension, step_name, temp_dir): + compiler_type = build_extension.compiler.compiler_type + if compiler_type == 'unix': + compiler_cmd = build_extension.compiler.compiler_so + # TODO: we could try to call "[cmd] --version" for better insights + if not compiler_cmd: + pass + elif 'clang' in compiler_cmd or 'clang' in compiler_cmd[0]: + compiler_type = 'clang' + elif 'icc' in compiler_cmd or 'icc' in compiler_cmd[0]: + compiler_type = 'icc' + elif 'gcc' in compiler_cmd or 'gcc' in compiler_cmd[0]: + compiler_type = 'gcc' + elif 'g++' in compiler_cmd or 'g++' in compiler_cmd[0]: + compiler_type = 'gcc' + config = PGO_CONFIG.get(compiler_type) + orig_flags = [] + if config and step_name in config: + flags = [f.format(TEMPDIR=temp_dir) for f in config[step_name]] + for extension in build_extension.extensions: + orig_flags.append((extension.extra_compile_args, extension.extra_link_args)) + extension.extra_compile_args = extension.extra_compile_args + flags + extension.extra_link_args = extension.extra_link_args + flags + else: + print("No PGO %s configuration known for C compiler type '%s'" % (step_name, compiler_type), + file=sys.stderr) + return orig_flags + + @property + def so_ext(self): + """The extension suffix for compiled modules.""" + try: + return self._so_ext + except AttributeError: + self._so_ext = self._get_build_extension().get_ext_filename('') + return self._so_ext + + def _clear_distutils_mkpath_cache(self): + """clear distutils mkpath cache + + prevents distutils from skipping re-creation of dirs that have been removed + """ + try: + from distutils.dir_util import _path_created + except ImportError: + pass + else: + _path_created.clear() + + def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None, + pgo_step_name=None, _build_ext=build_ext): + self._clear_distutils_mkpath_cache() + dist = Distribution() + config_files = dist.find_config_files() + try: + config_files.remove('setup.cfg') + except ValueError: + pass + dist.parse_config_files(config_files) + + if not temp_dir: + temp_dir = lib_dir + add_pgo_flags = self._add_pgo_flags + + if pgo_step_name: + base_build_ext = _build_ext + class _build_ext(_build_ext): + def build_extensions(self): + add_pgo_flags(self, pgo_step_name, temp_dir) + base_build_ext.build_extensions(self) + + build_extension = _build_ext(dist) + build_extension.finalize_options() + if temp_dir: + temp_dir = encode_fs(temp_dir) + build_extension.build_temp = temp_dir + if lib_dir: + lib_dir = encode_fs(lib_dir) + build_extension.build_lib = lib_dir + if extension is not None: + build_extension.extensions = [extension] + return build_extension + + @staticmethod + def clean_annotated_html(html): + """Clean up the annotated HTML source. + + Strips the link to the generated C or C++ file, which we do not + present to the user. + """ + r = re.compile('

Raw output: (.*)') + html = '\n'.join(l for l in html.splitlines() if not r.match(l)) + return html + +__doc__ = __doc__.format( + # rST doesn't see the -+ flag as part of an option list, so we + # hide it from the module-level docstring. + CYTHON_DOC=dedent(CythonMagics.cython.__doc__ + .replace('-+, --cplus', '--cplus ')), + CYTHON_INLINE_DOC=dedent(CythonMagics.cython_inline.__doc__), + CYTHON_PYXIMPORT_DOC=dedent(CythonMagics.cython_pyximport.__doc__), +) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestCythonizeArgsParser.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestCythonizeArgsParser.py new file mode 100644 index 0000000000000000000000000000000000000000..c5a682dd6440800dad3cc3e120a910f381f1e83b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestCythonizeArgsParser.py @@ -0,0 +1,482 @@ +from Cython.Build.Cythonize import ( + create_args_parser, parse_args_raw, parse_args, + parallel_compiles +) + +from Cython.Compiler import Options +from Cython.Compiler.Tests.Utils import backup_Options, restore_Options, check_global_options + +from unittest import TestCase + +import sys +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # doesn't accept 'str' in Py2 + + +class TestCythonizeArgsParser(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.parse_args = lambda x, parser=create_args_parser() : parse_args_raw(parser, x) + + + def are_default(self, options, skip): + # empty containers + empty_containers = ['directives', 'compile_time_env', 'options', 'excludes'] + are_none = ['language_level', 'annotate', 'build', 'build_inplace', 'force', 'quiet', 'lenient', 'keep_going', 'no_docstrings'] + for opt_name in empty_containers: + if len(getattr(options, opt_name))!=0 and (opt_name not in skip): + self.assertEqual(opt_name,"", msg="For option "+opt_name) + return False + for opt_name in are_none: + if (getattr(options, opt_name) is not None) and (opt_name not in skip): + self.assertEqual(opt_name,"", msg="For option "+opt_name) + return False + if options.parallel!=parallel_compiles and ('parallel' not in skip): + return False + return True + + # testing directives: + def test_directive_short(self): + options, args = self.parse_args(['-X', 'cdivision=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + + def test_directive_long(self): + options, args = self.parse_args(['--directive', 'cdivision=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + + def test_directive_multiple(self): + options, args = self.parse_args(['-X', 'cdivision=True', '-X', 'c_string_type=bytes']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + self.assertEqual(options.directives['c_string_type'], 'bytes') + + def test_directive_multiple_v2(self): + options, args = self.parse_args(['-X', 'cdivision=True,c_string_type=bytes']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + self.assertEqual(options.directives['c_string_type'], 'bytes') + + def test_directive_value_yes(self): + options, args = self.parse_args(['-X', 'cdivision=YeS']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + + def test_directive_value_no(self): + options, args = self.parse_args(['-X', 'cdivision=no']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], False) + + def test_directive_value_invalid(self): + with self.assertRaises(ValueError) as context: + options, args = self.parse_args(['-X', 'cdivision=sadfasd']) + + def test_directive_key_invalid(self): + with self.assertRaises(ValueError) as context: + options, args = self.parse_args(['-X', 'abracadabra']) + + def test_directive_no_value(self): + with self.assertRaises(ValueError) as context: + options, args = self.parse_args(['-X', 'cdivision']) + + def test_directives_types(self): + directives = { + 'auto_pickle': True, + 'c_string_type': 'bytearray', + 'c_string_type': 'bytes', + 'c_string_type': 'str', + 'c_string_type': 'bytearray', + 'c_string_type': 'unicode', + 'c_string_encoding' : 'ascii', + 'language_level' : 2, + 'language_level' : 3, + 'language_level' : '3str', + 'set_initial_path' : 'my_initial_path', + } + for key, value in directives.items(): + cmd = '{key}={value}'.format(key=key, value=str(value)) + options, args = self.parse_args(['-X', cmd]) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives']), msg = "Error for option: "+cmd) + self.assertEqual(options.directives[key], value, msg = "Error for option: "+cmd) + + def test_directives_wrong(self): + directives = { + 'auto_pickle': 42, # for bool type + 'auto_pickle': 'NONONO', # for bool type + 'c_string_type': 'bites', + #'c_string_encoding' : 'a', + #'language_level' : 4, + } + for key, value in directives.items(): + cmd = '{key}={value}'.format(key=key, value=str(value)) + with self.assertRaises(ValueError, msg = "Error for option: "+cmd) as context: + options, args = self.parse_args(['-X', cmd]) + + def test_compile_time_env_short(self): + options, args = self.parse_args(['-E', 'MYSIZE=10']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + + def test_compile_time_env_long(self): + options, args = self.parse_args(['--compile-time-env', 'MYSIZE=10']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + + def test_compile_time_env_multiple(self): + options, args = self.parse_args(['-E', 'MYSIZE=10', '-E', 'ARRSIZE=11']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + self.assertEqual(options.compile_time_env['ARRSIZE'], 11) + + def test_compile_time_env_multiple_v2(self): + options, args = self.parse_args(['-E', 'MYSIZE=10,ARRSIZE=11']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + self.assertEqual(options.compile_time_env['ARRSIZE'], 11) + + #testing options + def test_option_short(self): + options, args = self.parse_args(['-s', 'docstrings=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_long(self): + options, args = self.parse_args(['--option', 'docstrings=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_multiple(self): + options, args = self.parse_args(['-s', 'docstrings=True', '-s', 'buffer_max_dims=8']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + self.assertEqual(options.options['buffer_max_dims'], True) # really? + + def test_option_multiple_v2(self): + options, args = self.parse_args(['-s', 'docstrings=True,buffer_max_dims=8']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + self.assertEqual(options.options['buffer_max_dims'], True) # really? + + def test_option_value_yes(self): + options, args = self.parse_args(['-s', 'docstrings=YeS']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_4242(self): + options, args = self.parse_args(['-s', 'docstrings=4242']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_0(self): + options, args = self.parse_args(['-s', 'docstrings=0']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], False) + + def test_option_value_emptystr(self): + options, args = self.parse_args(['-s', 'docstrings=']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_a_str(self): + options, args = self.parse_args(['-s', 'docstrings=BB']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_no(self): + options, args = self.parse_args(['-s', 'docstrings=nO']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], False) + + def test_option_no_value(self): + options, args = self.parse_args(['-s', 'docstrings']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_any_key(self): + options, args = self.parse_args(['-s', 'abracadabra']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['abracadabra'], True) + + def test_language_level_2(self): + options, args = self.parse_args(['-2']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['language_level'])) + self.assertEqual(options.language_level, 2) + + def test_language_level_3(self): + options, args = self.parse_args(['-3']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['language_level'])) + self.assertEqual(options.language_level, 3) + + def test_language_level_3str(self): + options, args = self.parse_args(['--3str']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['language_level'])) + self.assertEqual(options.language_level, '3str') + + def test_annotate_short(self): + options, args = self.parse_args(['-a']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'default') + + def test_annotate_long(self): + options, args = self.parse_args(['--annotate']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'default') + + def test_annotate_fullc(self): + options, args = self.parse_args(['--annotate-fullc']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'fullc') + + def test_annotate_and_positional(self): + options, args = self.parse_args(['-a', 'foo.pyx']) + self.assertEqual(args, ['foo.pyx']) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'default') + + def test_annotate_and_optional(self): + options, args = self.parse_args(['-a', '--3str']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate', 'language_level'])) + self.assertEqual(options.annotate, 'default') + self.assertEqual(options.language_level, '3str') + + def test_exclude_short(self): + options, args = self.parse_args(['-x', '*.pyx']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['excludes'])) + self.assertTrue('*.pyx' in options.excludes) + + def test_exclude_long(self): + options, args = self.parse_args(['--exclude', '*.pyx']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['excludes'])) + self.assertTrue('*.pyx' in options.excludes) + + def test_exclude_multiple(self): + options, args = self.parse_args(['--exclude', '*.pyx', '--exclude', '*.py', ]) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['excludes'])) + self.assertEqual(options.excludes, ['*.pyx', '*.py']) + + def test_build_short(self): + options, args = self.parse_args(['-b']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build'])) + self.assertEqual(options.build, True) + + def test_build_long(self): + options, args = self.parse_args(['--build']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build'])) + self.assertEqual(options.build, True) + + def test_inplace_short(self): + options, args = self.parse_args(['-i']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build_inplace'])) + self.assertEqual(options.build_inplace, True) + + def test_inplace_long(self): + options, args = self.parse_args(['--inplace']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build_inplace'])) + self.assertEqual(options.build_inplace, True) + + def test_parallel_short(self): + options, args = self.parse_args(['-j', '42']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['parallel'])) + self.assertEqual(options.parallel, 42) + + def test_parallel_long(self): + options, args = self.parse_args(['--parallel', '42']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['parallel'])) + self.assertEqual(options.parallel, 42) + + def test_force_short(self): + options, args = self.parse_args(['-f']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['force'])) + self.assertEqual(options.force, True) + + def test_force_long(self): + options, args = self.parse_args(['--force']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['force'])) + self.assertEqual(options.force, True) + + def test_quite_short(self): + options, args = self.parse_args(['-q']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['quiet'])) + self.assertEqual(options.quiet, True) + + def test_quite_long(self): + options, args = self.parse_args(['--quiet']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['quiet'])) + self.assertEqual(options.quiet, True) + + def test_lenient_long(self): + options, args = self.parse_args(['--lenient']) + self.assertTrue(self.are_default(options, ['lenient'])) + self.assertFalse(args) + self.assertEqual(options.lenient, True) + + def test_keep_going_short(self): + options, args = self.parse_args(['-k']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['keep_going'])) + self.assertEqual(options.keep_going, True) + + def test_keep_going_long(self): + options, args = self.parse_args(['--keep-going']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['keep_going'])) + self.assertEqual(options.keep_going, True) + + def test_no_docstrings_long(self): + options, args = self.parse_args(['--no-docstrings']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['no_docstrings'])) + self.assertEqual(options.no_docstrings, True) + + def test_file_name(self): + options, args = self.parse_args(['file1.pyx', 'file2.pyx']) + self.assertEqual(len(args), 2) + self.assertEqual(args[0], 'file1.pyx') + self.assertEqual(args[1], 'file2.pyx') + self.assertTrue(self.are_default(options, [])) + + def test_option_first(self): + options, args = self.parse_args(['-i', 'file.pyx']) + self.assertEqual(args, ['file.pyx']) + self.assertEqual(options.build_inplace, True) + self.assertTrue(self.are_default(options, ['build_inplace'])) + + def test_file_inbetween(self): + options, args = self.parse_args(['-i', 'file.pyx', '-a']) + self.assertEqual(args, ['file.pyx']) + self.assertEqual(options.build_inplace, True) + self.assertEqual(options.annotate, 'default') + self.assertTrue(self.are_default(options, ['build_inplace', 'annotate'])) + + def test_option_trailing(self): + options, args = self.parse_args(['file.pyx', '-i']) + self.assertEqual(args, ['file.pyx']) + self.assertEqual(options.build_inplace, True) + self.assertTrue(self.are_default(options, ['build_inplace'])) + + def test_interspersed_positional(self): + options, sources = self.parse_args([ + 'file1.pyx', '-a', + 'file2.pyx' + ]) + self.assertEqual(sources, ['file1.pyx', 'file2.pyx']) + self.assertEqual(options.annotate, 'default') + self.assertTrue(self.are_default(options, ['annotate'])) + + def test_interspersed_positional2(self): + options, sources = self.parse_args([ + 'file1.pyx', '-a', + 'file2.pyx', '-a', 'file3.pyx' + ]) + self.assertEqual(sources, ['file1.pyx', 'file2.pyx', 'file3.pyx']) + self.assertEqual(options.annotate, 'default') + self.assertTrue(self.are_default(options, ['annotate'])) + + def test_interspersed_positional3(self): + options, sources = self.parse_args([ + '-f', 'f1', 'f2', '-a', + 'f3', 'f4', '-a', 'f5' + ]) + self.assertEqual(sources, ['f1', 'f2', 'f3', 'f4', 'f5']) + self.assertEqual(options.annotate, 'default') + self.assertEqual(options.force, True) + self.assertTrue(self.are_default(options, ['annotate', 'force'])) + + def test_wrong_option(self): + old_stderr = sys.stderr + stderr = sys.stderr = StringIO() + try: + self.assertRaises(SystemExit, self.parse_args, + ['--unknown-option'] + ) + finally: + sys.stderr = old_stderr + self.assertTrue(stderr.getvalue()) + + +class TestParseArgs(TestCase): + def setUp(self): + self._options_backup = backup_Options() + + def tearDown(self): + restore_Options(self._options_backup) + + def check_default_global_options(self, white_list=[]): + self.assertEqual(check_global_options(self._options_backup, white_list), "") + + def test_build_set_for_inplace(self): + options, args = parse_args(['foo.pyx', '-i']) + self.assertEqual(options.build, True) + self.check_default_global_options() + + def test_lenient(self): + options, sources = parse_args(['foo.pyx', '--lenient']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.error_on_unknown_names, False) + self.assertEqual(Options.error_on_uninitialized, False) + self.check_default_global_options(['error_on_unknown_names', 'error_on_uninitialized']) + + def test_annotate(self): + options, sources = parse_args(['foo.pyx', '--annotate']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.annotate, 'default') + self.check_default_global_options(['annotate']) + + def test_annotate_fullc(self): + options, sources = parse_args(['foo.pyx', '--annotate-fullc']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.annotate, 'fullc') + self.check_default_global_options(['annotate']) + + def test_no_docstrings(self): + options, sources = parse_args(['foo.pyx', '--no-docstrings']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.docstrings, False) + self.check_default_global_options(['docstrings']) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestInline.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestInline.py new file mode 100644 index 0000000000000000000000000000000000000000..53346137052b7ea6d3f02a5407f2df266868c109 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestInline.py @@ -0,0 +1,112 @@ +import os +import tempfile +import unittest +from Cython.Shadow import inline +from Cython.Build.Inline import safe_type +from Cython.TestUtils import CythonTest + +try: + import numpy + has_numpy = True +except: + has_numpy = False + +test_kwds = dict(force=True, quiet=True) + +global_value = 100 + +class TestInline(CythonTest): + def setUp(self): + CythonTest.setUp(self) + self._call_kwds = dict(test_kwds) + if os.path.isdir('TEST_TMP'): + lib_dir = os.path.join('TEST_TMP','inline') + else: + lib_dir = tempfile.mkdtemp(prefix='cython_inline_') + self._call_kwds['lib_dir'] = lib_dir + + def test_simple(self): + self.assertEqual(inline("return 1+2", **self._call_kwds), 3) + + def test_types(self): + self.assertEqual(inline(""" + cimport cython + return cython.typeof(a), cython.typeof(b) + """, a=1.0, b=[], **self._call_kwds), ('double', 'list object')) + + def test_locals(self): + a = 1 + b = 2 + self.assertEqual(inline("return a+b", **self._call_kwds), 3) + + def test_globals(self): + self.assertEqual(inline("return global_value + 1", **self._call_kwds), global_value + 1) + + def test_no_return(self): + self.assertEqual(inline(""" + a = 1 + cdef double b = 2 + cdef c = [] + """, **self._call_kwds), dict(a=1, b=2.0, c=[])) + + def test_def_node(self): + foo = inline("def foo(x): return x * x", **self._call_kwds)['foo'] + self.assertEqual(foo(7), 49) + + def test_class_ref(self): + class Type(object): + pass + tp = inline("Type")['Type'] + self.assertEqual(tp, Type) + + def test_pure(self): + import cython as cy + b = inline(""" + b = cy.declare(float, a) + c = cy.declare(cy.pointer(cy.float), &b) + return b + """, a=3, **self._call_kwds) + self.assertEqual(type(b), float) + + def test_compiler_directives(self): + self.assertEqual( + inline('return sum(x)', + x=[1, 2, 3], + cython_compiler_directives={'boundscheck': False}), + 6 + ) + + def test_lang_version(self): + # GH-3419. Caching for inline code didn't always respect compiler directives. + inline_divcode = "def f(int a, int b): return a/b" + self.assertEqual( + inline(inline_divcode, language_level=2)['f'](5,2), + 2 + ) + self.assertEqual( + inline(inline_divcode, language_level=3)['f'](5,2), + 2.5 + ) + self.assertEqual( + inline(inline_divcode, language_level=2)['f'](5,2), + 2 + ) + + def test_repeated_use(self): + inline_mulcode = "def f(int a, int b): return a * b" + self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10) + self.assertEqual(inline(inline_mulcode)['f'](5, 3), 15) + self.assertEqual(inline(inline_mulcode)['f'](6, 2), 12) + self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10) + + f = inline(inline_mulcode)['f'] + self.assertEqual(f(5, 2), 10) + self.assertEqual(f(5, 3), 15) + + @unittest.skipIf(not has_numpy, "NumPy is not available") + def test_numpy(self): + import numpy + a = numpy.ndarray((10, 20)) + a[0,0] = 10 + self.assertEqual(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]') + self.assertEqual(inline("return a[0,0]", a=a, **self._call_kwds), 10.0) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestRecythonize.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestRecythonize.py new file mode 100644 index 0000000000000000000000000000000000000000..eb87018cb8770832852d50a210afbdec45d2fd36 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/TestRecythonize.py @@ -0,0 +1,212 @@ +import shutil +import os +import tempfile +import time + +import Cython.Build.Dependencies +import Cython.Utils +from Cython.TestUtils import CythonTest + + +def fresh_cythonize(*args, **kwargs): + Cython.Utils.clear_function_caches() + Cython.Build.Dependencies._dep_tree = None # discard method caches + Cython.Build.Dependencies.cythonize(*args, **kwargs) + +class TestRecythonize(CythonTest): + + def setUp(self): + CythonTest.setUp(self) + self.temp_dir = ( + tempfile.mkdtemp( + prefix='recythonize-test', + dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None + ) + ) + + def tearDown(self): + CythonTest.tearDown(self) + shutil.rmtree(self.temp_dir) + + def test_recythonize_pyx_on_pxd_change(self): + + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_pyx = os.path.join(src_dir, 'a.pyx') + a_c = os.path.join(src_dir, 'a.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_pyx, 'w') as f: + f.write('value = 1\n') + + + # The dependencies for "a.pyx" are "a.pxd" and "a.pyx". + self.assertEqual({a_pxd, a_pyx}, dep_tree.all_dependencies(a_pyx)) + + # Cythonize to create a.c + fresh_cythonize(a_pyx) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(a_c) as f: + a_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize(a_pyx) + + with open(a_c) as f: + a_c_contents2 = f.read() + + self.assertTrue("__pyx_v_1a_value = 1;" in a_c_contents1) + self.assertFalse("__pyx_v_1a_value = 1;" in a_c_contents2) + self.assertTrue("__pyx_v_1a_value = 1.0;" in a_c_contents2) + self.assertFalse("__pyx_v_1a_value = 1.0;" in a_c_contents1) + + + def test_recythonize_py_on_pxd_change(self): + + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_py = os.path.join(src_dir, 'a.py') + a_c = os.path.join(src_dir, 'a.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_py, 'w') as f: + f.write('value = 1\n') + + + # The dependencies for "a.py" are "a.pxd" and "a.py". + self.assertEqual({a_pxd, a_py}, dep_tree.all_dependencies(a_py)) + + # Cythonize to create a.c + fresh_cythonize(a_py) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(a_c) as f: + a_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize(a_py) + + with open(a_c) as f: + a_c_contents2 = f.read() + + + self.assertTrue("__pyx_v_1a_value = 1;" in a_c_contents1) + self.assertFalse("__pyx_v_1a_value = 1;" in a_c_contents2) + self.assertTrue("__pyx_v_1a_value = 1.0;" in a_c_contents2) + self.assertFalse("__pyx_v_1a_value = 1.0;" in a_c_contents1) + + def test_recythonize_pyx_on_dep_pxd_change(self): + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_pyx = os.path.join(src_dir, 'a.pyx') + b_pyx = os.path.join(src_dir, 'b.pyx') + b_c = os.path.join(src_dir, 'b.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_pyx, 'w') as f: + f.write('value = 1\n') + + with open(b_pyx, 'w') as f: + f.write('cimport a\n' + 'a.value = 2\n') + + + # The dependencies for "b.pyx" are "a.pxd" and "b.pyx". + self.assertEqual({a_pxd, b_pyx}, dep_tree.all_dependencies(b_pyx)) + + + # Cythonize to create b.c + fresh_cythonize([a_pyx, b_pyx]) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(b_c) as f: + b_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize([a_pyx, b_pyx]) + + with open(b_c) as f: + b_c_contents2 = f.read() + + + + self.assertTrue("__pyx_v_1a_value = 2;" in b_c_contents1) + self.assertFalse("__pyx_v_1a_value = 2;" in b_c_contents2) + self.assertTrue("__pyx_v_1a_value = 2.0;" in b_c_contents2) + self.assertFalse("__pyx_v_1a_value = 2.0;" in b_c_contents1) + + + + def test_recythonize_py_on_dep_pxd_change(self): + + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_pyx = os.path.join(src_dir, 'a.pyx') + b_pxd = os.path.join(src_dir, 'b.pxd') + b_py = os.path.join(src_dir, 'b.py') + b_c = os.path.join(src_dir, 'b.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_pyx, 'w') as f: + f.write('value = 1\n') + + with open(b_pxd, 'w') as f: + f.write('cimport a\n') + + with open(b_py, 'w') as f: + f.write('a.value = 2\n') + + + # The dependencies for b.py are "a.pxd", "b.pxd" and "b.py". + self.assertEqual({a_pxd, b_pxd, b_py}, dep_tree.all_dependencies(b_py)) + + + # Cythonize to create b.c + fresh_cythonize([a_pyx, b_py]) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(b_c) as f: + b_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize([a_pyx, b_py]) + + with open(b_c) as f: + b_c_contents2 = f.read() + + self.assertTrue("__pyx_v_1a_value = 2;" in b_c_contents1) + self.assertFalse("__pyx_v_1a_value = 2;" in b_c_contents2) + self.assertTrue("__pyx_v_1a_value = 2.0;" in b_c_contents2) + self.assertFalse("__pyx_v_1a_value = 2.0;" in b_c_contents1) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/Tests/__init__.py @@ -0,0 +1 @@ +# empty file diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4775fc3a94d0ff945d4ff3cb7a656d4f5fbe8a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__init__.py @@ -0,0 +1,14 @@ +from .Dependencies import cythonize + +import sys +if sys.version_info < (3, 7): + from .Distutils import build_ext +del sys + + +def __getattr__(name): + if name == 'build_ext': + # Lazy import, fails if distutils is not available (in Python 3.12+). + from .Distutils import build_ext + return build_ext + raise AttributeError("module '%s' has no attribute '%s'" % (__name__, name)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/BuildExecutable.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/BuildExecutable.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62ec2281b2607f9a2317df71000c84290d6e5a54 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/BuildExecutable.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Cythonize.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Cythonize.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0427bde8c37f18a7f06d6cde14ba59771d7db788 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Cythonize.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Dependencies.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Dependencies.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35062dff24898d9cf11e4a2cf8277f599ec052ce Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Dependencies.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Distutils.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Distutils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06390f419b6c67db7981de9081e2eb227eb936a0 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Distutils.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Inline.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Inline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bde1ac4126e98b5afd8ff91a3fafb7f7e482f4e Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/Inline.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/IpythonMagic.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/IpythonMagic.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cf4ede400a9a07f8412d01d6627fcf77579f50b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/IpythonMagic.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8e56c2fe561c7e0b2e8a0b0414cde0c32abcf31 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Build/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/AutoDocTransforms.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/AutoDocTransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c74aab7b7c1ae1315ef3848732126423258ab5e6 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/AutoDocTransforms.py @@ -0,0 +1,318 @@ +from __future__ import absolute_import, print_function + +from .Visitor import CythonTransform +from .StringEncoding import EncodedString +from . import Options +from . import PyrexTypes +from ..CodeWriter import ExpressionWriter +from .Errors import warning + + +class AnnotationWriter(ExpressionWriter): + """ + A Cython code writer for Python expressions in argument/variable annotations. + """ + def __init__(self, description=None): + """description is optional. If specified it is used in + warning messages for the nodes that don't convert to string properly. + If not specified then no messages are generated. + """ + ExpressionWriter.__init__(self) + self.description = description + self.incomplete = False + + def visit_Node(self, node): + self.put(u"") + self.incomplete = True + if self.description: + warning(node.pos, + "Failed to convert code to string representation in {0}".format( + self.description), level=1) + + def visit_LambdaNode(self, node): + # XXX Should we do better? + self.put("") + self.incomplete = True + if self.description: + warning(node.pos, + "Failed to convert lambda to string representation in {0}".format( + self.description), level=1) + + def visit_UnicodeNode(self, node): + # Discard Unicode prefix in annotations. Any tool looking at them + # would probably expect Py3 string semantics. + self.emit_string(node, "") + + def visit_AnnotationNode(self, node): + self.put(node.string.unicode_value) + + +class EmbedSignature(CythonTransform): + + def __init__(self, context): + super(EmbedSignature, self).__init__(context) + self.class_name = None + self.class_node = None + + def _fmt_expr(self, node): + writer = ExpressionWriter() + result = writer.write(node) + # print(type(node).__name__, '-->', result) + return result + + def _fmt_annotation(self, node): + writer = AnnotationWriter() + result = writer.write(node) + # print(type(node).__name__, '-->', result) + return result + + def _setup_format(self): + signature_format = self.current_directives['embedsignature.format'] + self.is_format_c = signature_format == 'c' + self.is_format_python = signature_format == 'python' + self.is_format_clinic = signature_format == 'clinic' + + def _fmt_arg(self, arg): + arg_doc = arg.name + annotation = None + defaultval = None + if arg.is_self_arg: + if self.is_format_clinic: + arg_doc = '$self' + elif arg.is_type_arg: + if self.is_format_clinic: + arg_doc = '$type' + elif self.is_format_c: + if arg.type is not PyrexTypes.py_object_type: + arg_doc = arg.type.declaration_code(arg.name, for_display=1) + elif self.is_format_python: + if not arg.annotation: + annotation = self._fmt_type(arg.type) + if arg.annotation: + if not self.is_format_clinic: + annotation = self._fmt_annotation(arg.annotation) + if arg.default: + defaultval = self._fmt_expr(arg.default) + if annotation: + arg_doc = arg_doc + (': %s' % annotation) + if defaultval: + arg_doc = arg_doc + (' = %s' % defaultval) + elif defaultval: + arg_doc = arg_doc + ('=%s' % defaultval) + return arg_doc + + def _fmt_star_arg(self, arg): + arg_doc = arg.name + if arg.annotation: + if not self.is_format_clinic: + annotation = self._fmt_annotation(arg.annotation) + arg_doc = arg_doc + (': %s' % annotation) + return arg_doc + + def _fmt_arglist(self, args, + npoargs=0, npargs=0, pargs=None, + nkargs=0, kargs=None, + hide_self=False): + arglist = [] + for arg in args: + if not hide_self or not arg.entry.is_self_arg: + arg_doc = self._fmt_arg(arg) + arglist.append(arg_doc) + if pargs: + arg_doc = self._fmt_star_arg(pargs) + arglist.insert(npargs + npoargs, '*%s' % arg_doc) + elif nkargs: + arglist.insert(npargs + npoargs, '*') + if npoargs: + arglist.insert(npoargs, '/') + if kargs: + arg_doc = self._fmt_star_arg(kargs) + arglist.append('**%s' % arg_doc) + return arglist + + def _fmt_type(self, type): + if type is PyrexTypes.py_object_type: + return None + elif self.is_format_c: + code = type.declaration_code("", for_display=1) + return code + elif self.is_format_python: + annotation = None + if type.is_string: + annotation = self.current_directives['c_string_type'] + elif type.is_numeric: + annotation = type.py_type_name() + if annotation is None: + code = type.declaration_code('', for_display=1) + annotation = code.replace(' ', '_').replace('*', 'p') + return annotation + return None + + def _fmt_signature(self, cls_name, func_name, args, + npoargs=0, npargs=0, pargs=None, + nkargs=0, kargs=None, + return_expr=None, return_type=None, + hide_self=False): + arglist = self._fmt_arglist( + args, npoargs, npargs, pargs, nkargs, kargs, + hide_self=hide_self, + ) + arglist_doc = ', '.join(arglist) + func_doc = '%s(%s)' % (func_name, arglist_doc) + if self.is_format_c and cls_name: + func_doc = '%s.%s' % (cls_name, func_doc) + if not self.is_format_clinic: + ret_doc = None + if return_expr: + ret_doc = self._fmt_annotation(return_expr) + elif return_type: + ret_doc = self._fmt_type(return_type) + if ret_doc: + func_doc = '%s -> %s' % (func_doc, ret_doc) + return func_doc + + def _embed_signature(self, signature, node_doc): + if self.is_format_clinic and self.current_directives['binding']: + return node_doc + if node_doc: + if self.is_format_clinic: + docfmt = "%s\n--\n\n%s" + else: + docfmt = "%s\n%s" + return docfmt % (signature, node_doc) + else: + if self.is_format_clinic: + docfmt = "%s\n--\n\n" + else: + docfmt = "%s" + return docfmt % signature + + def __call__(self, node): + if not Options.docstrings: + return node + else: + return super(EmbedSignature, self).__call__(node) + + def visit_ClassDefNode(self, node): + oldname = self.class_name + oldclass = self.class_node + self.class_node = node + try: + # PyClassDefNode + self.class_name = node.name + except AttributeError: + # CClassDefNode + self.class_name = node.class_name + self.visitchildren(node) + self.class_name = oldname + self.class_node = oldclass + return node + + def visit_LambdaNode(self, node): + # lambda expressions so not have signature or inner functions + return node + + def visit_DefNode(self, node): + if not self.current_directives['embedsignature']: + return node + self._setup_format() + + is_constructor = False + hide_self = False + if node.entry.is_special: + is_constructor = self.class_node and node.name == '__init__' + if not is_constructor: + return node + class_name = None + func_name = node.name + if self.is_format_c: + func_name = self.class_name + hide_self = True + else: + class_name, func_name = self.class_name, node.name + + npoargs = getattr(node, 'num_posonly_args', 0) + nkargs = getattr(node, 'num_kwonly_args', 0) + npargs = len(node.args) - nkargs - npoargs + signature = self._fmt_signature( + class_name, func_name, node.args, + npoargs, npargs, node.star_arg, + nkargs, node.starstar_arg, + return_expr=node.return_type_annotation, + return_type=None, hide_self=hide_self) + if signature: + if is_constructor and self.is_format_c: + doc_holder = self.class_node.entry.type.scope + else: + doc_holder = node.entry + if doc_holder.doc is not None: + old_doc = doc_holder.doc + elif not is_constructor and getattr(node, 'py_func', None) is not None: + old_doc = node.py_func.entry.doc + else: + old_doc = None + new_doc = self._embed_signature(signature, old_doc) + doc_holder.doc = EncodedString(new_doc) + if not is_constructor and getattr(node, 'py_func', None) is not None: + node.py_func.entry.doc = EncodedString(new_doc) + return node + + def visit_CFuncDefNode(self, node): + if not node.overridable: # not cpdef FOO(...): + return node + if not self.current_directives['embedsignature']: + return node + self._setup_format() + + signature = self._fmt_signature( + self.class_name, node.declarator.base.name, + node.declarator.args, + return_type=node.return_type) + if signature: + if node.entry.doc is not None: + old_doc = node.entry.doc + elif getattr(node, 'py_func', None) is not None: + old_doc = node.py_func.entry.doc + else: + old_doc = None + new_doc = self._embed_signature(signature, old_doc) + node.entry.doc = EncodedString(new_doc) + py_func = getattr(node, 'py_func', None) + if py_func is not None: + py_func.entry.doc = EncodedString(new_doc) + return node + + def visit_PropertyNode(self, node): + if not self.current_directives['embedsignature']: + return node + self._setup_format() + + entry = node.entry + body = node.body + prop_name = entry.name + type_name = None + if entry.visibility == 'public': + if self.is_format_c: + # property synthesised from a cdef public attribute + type_name = entry.type.declaration_code("", for_display=1) + if not entry.type.is_pyobject: + type_name = "'%s'" % type_name + elif entry.type.is_extension_type: + type_name = entry.type.module_name + '.' + type_name + elif self.is_format_python: + type_name = self._fmt_type(entry.type) + if type_name is None: + for stat in body.stats: + if stat.name != '__get__': + continue + if self.is_format_c: + prop_name = '%s.%s' % (self.class_name, prop_name) + ret_annotation = stat.return_type_annotation + if ret_annotation: + type_name = self._fmt_annotation(ret_annotation) + if type_name is not None : + signature = '%s: %s' % (prop_name, type_name) + new_doc = self._embed_signature(signature, entry.doc) + if not self.is_format_clinic: + entry.doc = EncodedString(new_doc) + return node diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/CmdLine.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/CmdLine.py new file mode 100644 index 0000000000000000000000000000000000000000..776636c3234fb04cbd2d3a50ae092de9e2c900ec --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/CmdLine.py @@ -0,0 +1,251 @@ +# +# Cython - Command Line Parsing +# + +from __future__ import absolute_import + +import sys +import os +from argparse import ArgumentParser, Action, SUPPRESS +from . import Options + + +if sys.version_info < (3, 3): + # TODO: This workaround can be removed in Cython 3.1 + FileNotFoundError = IOError + + +class ParseDirectivesAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + old_directives = dict(getattr(namespace, self.dest, + Options.get_directive_defaults())) + directives = Options.parse_directive_list( + values, relaxed_bool=True, current_settings=old_directives) + setattr(namespace, self.dest, directives) + + +class ParseOptionsAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + options = dict(getattr(namespace, self.dest, {})) + for opt in values.split(','): + if '=' in opt: + n, v = opt.split('=', 1) + v = v.lower() not in ('false', 'f', '0', 'no') + else: + n, v = opt, True + options[n] = v + setattr(namespace, self.dest, options) + + +class ParseCompileTimeEnvAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + old_env = dict(getattr(namespace, self.dest, {})) + new_env = Options.parse_compile_time_env(values, current_settings=old_env) + setattr(namespace, self.dest, new_env) + + +class ActivateAllWarningsAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + directives = getattr(namespace, 'compiler_directives', {}) + directives.update(Options.extra_warnings) + namespace.compiler_directives = directives + + +class SetLenientAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.error_on_unknown_names = False + namespace.error_on_uninitialized = False + + +class SetGDBDebugAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.gdb_debug = True + namespace.output_dir = os.curdir + + +class SetGDBDebugOutputAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.gdb_debug = True + namespace.output_dir = values + + +class SetAnnotateCoverageAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.annotate = True + namespace.annotate_coverage_xml = values + + +def create_cython_argparser(): + description = "Cython (https://cython.org/) is a compiler for code written in the "\ + "Cython language. Cython is based on Pyrex by Greg Ewing." + + parser = ArgumentParser(description=description, argument_default=SUPPRESS) + + parser.add_argument("-V", "--version", dest='show_version', action='store_const', const=1, + help='Display version number of cython compiler') + parser.add_argument("-l", "--create-listing", dest='use_listing_file', action='store_const', const=1, + help='Write error messages to a listing file') + parser.add_argument("-I", "--include-dir", dest='include_path', action='append', + help='Search for include files in named directory ' + '(multiple include directories are allowed).') + parser.add_argument("-o", "--output-file", dest='output_file', action='store', type=str, + help='Specify name of generated C file') + parser.add_argument("-t", "--timestamps", dest='timestamps', action='store_const', const=1, + help='Only compile newer source files') + parser.add_argument("-f", "--force", dest='timestamps', action='store_const', const=0, + help='Compile all source files (overrides implied -t)') + parser.add_argument("-v", "--verbose", dest='verbose', action='count', + help='Be verbose, print file names on multiple compilation') + parser.add_argument("-p", "--embed-positions", dest='embed_pos_in_docstring', action='store_const', const=1, + help='If specified, the positions in Cython files of each ' + 'function definition is embedded in its docstring.') + parser.add_argument("--cleanup", dest='generate_cleanup_code', action='store', type=int, + help='Release interned objects on python exit, for memory debugging. ' + 'Level indicates aggressiveness, default 0 releases nothing.') + parser.add_argument("-w", "--working", dest='working_path', action='store', type=str, + help='Sets the working directory for Cython (the directory modules are searched from)') + parser.add_argument("--gdb", action=SetGDBDebugAction, nargs=0, + help='Output debug information for cygdb') + parser.add_argument("--gdb-outdir", action=SetGDBDebugOutputAction, type=str, + help='Specify gdb debug information output directory. Implies --gdb.') + parser.add_argument("-D", "--no-docstrings", dest='docstrings', action='store_false', + help='Strip docstrings from the compiled module.') + parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate', + help='Produce a colorized HTML version of the source.') + parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate', + help='Produce a colorized HTML version of the source ' + 'which includes entire generated C/C++-code.') + parser.add_argument("--annotate-coverage", dest='annotate_coverage_xml', action=SetAnnotateCoverageAction, type=str, + help='Annotate and include coverage information from cov.xml.') + parser.add_argument("--line-directives", dest='emit_linenums', action='store_true', + help='Produce #line directives pointing to the .pyx source') + parser.add_argument("-+", "--cplus", dest='cplus', action='store_const', const=1, + help='Output a C++ rather than C file.') + parser.add_argument('--embed', action='store_const', const='main', + help='Generate a main() function that embeds the Python interpreter. ' + 'Pass --embed= for a name other than main().') + parser.add_argument('-2', dest='language_level', action='store_const', const=2, + help='Compile based on Python-2 syntax and code semantics.') + parser.add_argument('-3', dest='language_level', action='store_const', const=3, + help='Compile based on Python-3 syntax and code semantics.') + parser.add_argument('--3str', dest='language_level', action='store_const', const='3str', + help='Compile based on Python-3 syntax and code semantics without ' + 'assuming unicode by default for string literals under Python 2.') + parser.add_argument("--lenient", action=SetLenientAction, nargs=0, + help='Change some compile time errors to runtime errors to ' + 'improve Python compatibility') + parser.add_argument("--capi-reexport-cincludes", dest='capi_reexport_cincludes', action='store_true', + help='Add cincluded headers to any auto-generated header files.') + parser.add_argument("--fast-fail", dest='fast_fail', action='store_true', + help='Abort the compilation on the first error') + parser.add_argument("-Werror", "--warning-errors", dest='warning_errors', action='store_true', + help='Make all warnings into errors') + parser.add_argument("-Wextra", "--warning-extra", action=ActivateAllWarningsAction, nargs=0, + help='Enable extra warnings') + + parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...', + dest='compiler_directives', type=str, + action=ParseDirectivesAction, + help='Overrides a compiler directive') + parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...', + dest='compile_time_env', type=str, + action=ParseCompileTimeEnvAction, + help='Provides compile time env like DEF would do.') + parser.add_argument("--module-name", + dest='module_name', type=str, action='store', + help='Fully qualified module name. If not given, is ' + 'deduced from the import path if source file is in ' + 'a package, or equals the filename otherwise.') + parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources') + parser.add_argument('sources', nargs='*', default=[]) + + # TODO: add help + parser.add_argument("-z", "--pre-import", dest='pre_import', action='store', type=str, help=SUPPRESS) + parser.add_argument("--convert-range", dest='convert_range', action='store_true', help=SUPPRESS) + parser.add_argument("--no-c-in-traceback", dest='c_line_in_traceback', action='store_false', help=SUPPRESS) + parser.add_argument("--cimport-from-pyx", dest='cimport_from_pyx', action='store_true', help=SUPPRESS) + parser.add_argument("--old-style-globals", dest='old_style_globals', action='store_true', help=SUPPRESS) + + # debug stuff: + from . import DebugFlags + for name in vars(DebugFlags): + if name.startswith("debug"): + option_name = name.replace('_', '-') + parser.add_argument("--" + option_name, action='store_true', help=SUPPRESS) + + return parser + + +def parse_command_line_raw(parser, args): + # special handling for --embed and --embed=xxxx as they aren't correctly parsed + def filter_out_embed_options(args): + with_embed, without_embed = [], [] + for x in args: + if x == '--embed' or x.startswith('--embed='): + with_embed.append(x) + else: + without_embed.append(x) + return with_embed, without_embed + + with_embed, args_without_embed = filter_out_embed_options(args) + + arguments, unknown = parser.parse_known_args(args_without_embed) + + sources = arguments.sources + del arguments.sources + + # unknown can be either debug, embed or input files or really unknown + for option in unknown: + if option.startswith('-'): + parser.error("unknown option " + option) + else: + sources.append(option) + + # embed-stuff must be handled extra: + for x in with_embed: + if x == '--embed': + name = 'main' # default value + else: + name = x[len('--embed='):] + setattr(arguments, 'embed', name) + + return arguments, sources + + +def parse_command_line(args): + parser = create_cython_argparser() + arguments, sources = parse_command_line_raw(parser, args) + + work_dir = getattr(arguments, 'working_path', '') + for source in sources: + if work_dir and not os.path.isabs(source): + source = os.path.join(work_dir, source) + if not os.path.exists(source): + import errno + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), source) + + options = Options.CompilationOptions(Options.default_options) + for name, value in vars(arguments).items(): + if name.startswith('debug'): + from . import DebugFlags + if name in dir(DebugFlags): + setattr(DebugFlags, name, value) + else: + parser.error("Unknown debug flag: %s\n" % name) + elif hasattr(Options, name): + setattr(Options, name, value) + else: + setattr(options, name, value) + + if options.use_listing_file and len(sources) > 1: + parser.error("cython: Only one source file allowed when using -o\n") + if len(sources) == 0 and not options.show_version: + parser.error("cython: Need at least one source file\n") + if Options.embed and len(sources) > 1: + parser.error("cython: Only one source file allowed when using --embed\n") + if options.module_name: + if options.timestamps: + parser.error("cython: Cannot use --module-name with --timestamps\n") + if len(sources) > 1: + parser.error("cython: Only one source file allowed when using --module-name\n") + return options, sources diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Dataclass.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Dataclass.py new file mode 100644 index 0000000000000000000000000000000000000000..1b41bf9e6a0c709e80c995548170fa6707e6bdbd --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Dataclass.py @@ -0,0 +1,839 @@ +# functions to transform a c class into a dataclass + +from collections import OrderedDict +from textwrap import dedent +import operator + +from . import ExprNodes +from . import Nodes +from . import PyrexTypes +from . import Builtin +from . import Naming +from .Errors import error, warning +from .Code import UtilityCode, TempitaUtilityCode, PyxCodeWriter +from .Visitor import VisitorTransform +from .StringEncoding import EncodedString +from .TreeFragment import TreeFragment +from .ParseTreeTransforms import NormalizeTree, SkipDeclarations +from .Options import copy_inherited_directives + +_dataclass_loader_utilitycode = None + +def make_dataclasses_module_callnode(pos): + global _dataclass_loader_utilitycode + if not _dataclass_loader_utilitycode: + python_utility_code = UtilityCode.load_cached("Dataclasses_fallback", "Dataclasses.py") + python_utility_code = EncodedString(python_utility_code.impl) + _dataclass_loader_utilitycode = TempitaUtilityCode.load( + "SpecificModuleLoader", "Dataclasses.c", + context={'cname': "dataclasses", 'py_code': python_utility_code.as_c_string_literal()}) + return ExprNodes.PythonCapiCallNode( + pos, "__Pyx_Load_dataclasses_Module", + PyrexTypes.CFuncType(PyrexTypes.py_object_type, []), + utility_code=_dataclass_loader_utilitycode, + args=[], + ) + +def make_dataclass_call_helper(pos, callable, kwds): + utility_code = UtilityCode.load_cached("DataclassesCallHelper", "Dataclasses.c") + func_type = PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("callable", PyrexTypes.py_object_type, None), + PyrexTypes.CFuncTypeArg("kwds", PyrexTypes.py_object_type, None) + ], + ) + return ExprNodes.PythonCapiCallNode( + pos, + function_name="__Pyx_DataclassesCallHelper", + func_type=func_type, + utility_code=utility_code, + args=[callable, kwds], + ) + + +class RemoveAssignmentsToNames(VisitorTransform, SkipDeclarations): + """ + Cython (and Python) normally treats + + class A: + x = 1 + + as generating a class attribute. However for dataclasses the `= 1` should be interpreted as + a default value to initialize an instance attribute with. + This transform therefore removes the `x=1` assignment so that the class attribute isn't + generated, while recording what it has removed so that it can be used in the initialization. + """ + def __init__(self, names): + super(RemoveAssignmentsToNames, self).__init__() + self.names = names + self.removed_assignments = {} + + def visit_CClassNode(self, node): + self.visitchildren(node) + return node + + def visit_PyClassNode(self, node): + return node # go no further + + def visit_FuncDefNode(self, node): + return node # go no further + + def visit_SingleAssignmentNode(self, node): + if node.lhs.is_name and node.lhs.name in self.names: + if node.lhs.name in self.removed_assignments: + warning(node.pos, ("Multiple assignments for '%s' in dataclass; " + "using most recent") % node.lhs.name, 1) + self.removed_assignments[node.lhs.name] = node.rhs + return [] + return node + + # I believe cascaded assignment is always a syntax error with annotations + # so there's no need to define visit_CascadedAssignmentNode + + def visit_Node(self, node): + self.visitchildren(node) + return node + + +class TemplateCode(object): + """ + Adds the ability to keep track of placeholder argument names to PyxCodeWriter. + + Also adds extra_stats which are nodes bundled at the end when this + is converted to a tree. + """ + _placeholder_count = 0 + + def __init__(self, writer=None, placeholders=None, extra_stats=None): + self.writer = PyxCodeWriter() if writer is None else writer + self.placeholders = {} if placeholders is None else placeholders + self.extra_stats = [] if extra_stats is None else extra_stats + + def add_code_line(self, code_line): + self.writer.putln(code_line) + + def add_code_lines(self, code_lines): + for line in code_lines: + self.writer.putln(line) + + def reset(self): + # don't attempt to reset placeholders - it really doesn't matter if + # we have unused placeholders + self.writer.reset() + + def empty(self): + return self.writer.empty() + + def indenter(self): + return self.writer.indenter() + + def new_placeholder(self, field_names, value): + name = self._new_placeholder_name(field_names) + self.placeholders[name] = value + return name + + def add_extra_statements(self, statements): + if self.extra_stats is None: + assert False, "Can only use add_extra_statements on top-level writer" + self.extra_stats.extend(statements) + + def _new_placeholder_name(self, field_names): + while True: + name = "DATACLASS_PLACEHOLDER_%d" % self._placeholder_count + if (name not in self.placeholders + and name not in field_names): + # make sure name isn't already used and doesn't + # conflict with a variable name (which is unlikely but possible) + break + self._placeholder_count += 1 + return name + + def generate_tree(self, level='c_class'): + stat_list_node = TreeFragment( + self.writer.getvalue(), + level=level, + pipeline=[NormalizeTree(None)], + ).substitute(self.placeholders) + + stat_list_node.stats += self.extra_stats + return stat_list_node + + def insertion_point(self): + new_writer = self.writer.insertion_point() + return TemplateCode( + writer=new_writer, + placeholders=self.placeholders, + extra_stats=self.extra_stats + ) + + +class _MISSING_TYPE(object): + pass +MISSING = _MISSING_TYPE() + + +class Field(object): + """ + Field is based on the dataclasses.field class from the standard library module. + It is used internally during the generation of Cython dataclasses to keep track + of the settings for individual attributes. + + Attributes of this class are stored as nodes so they can be used in code construction + more readily (i.e. we store BoolNode rather than bool) + """ + default = MISSING + default_factory = MISSING + private = False + + literal_keys = ("repr", "hash", "init", "compare", "metadata") + + # default values are defined by the CPython dataclasses.field + def __init__(self, pos, default=MISSING, default_factory=MISSING, + repr=None, hash=None, init=None, + compare=None, metadata=None, + is_initvar=False, is_classvar=False, + **additional_kwds): + if default is not MISSING: + self.default = default + if default_factory is not MISSING: + self.default_factory = default_factory + self.repr = repr or ExprNodes.BoolNode(pos, value=True) + self.hash = hash or ExprNodes.NoneNode(pos) + self.init = init or ExprNodes.BoolNode(pos, value=True) + self.compare = compare or ExprNodes.BoolNode(pos, value=True) + self.metadata = metadata or ExprNodes.NoneNode(pos) + self.is_initvar = is_initvar + self.is_classvar = is_classvar + + for k, v in additional_kwds.items(): + # There should not be any additional keywords! + error(v.pos, "cython.dataclasses.field() got an unexpected keyword argument '%s'" % k) + + for field_name in self.literal_keys: + field_value = getattr(self, field_name) + if not field_value.is_literal: + error(field_value.pos, + "cython.dataclasses.field parameter '%s' must be a literal value" % field_name) + + def iterate_record_node_arguments(self): + for key in (self.literal_keys + ('default', 'default_factory')): + value = getattr(self, key) + if value is not MISSING: + yield key, value + + +def process_class_get_fields(node): + var_entries = node.scope.var_entries + # order of definition is used in the dataclass + var_entries = sorted(var_entries, key=operator.attrgetter('pos')) + var_names = [entry.name for entry in var_entries] + + # don't treat `x = 1` as an assignment of a class attribute within the dataclass + transform = RemoveAssignmentsToNames(var_names) + transform(node) + default_value_assignments = transform.removed_assignments + + base_type = node.base_type + fields = OrderedDict() + while base_type: + if base_type.is_external or not base_type.scope.implemented: + warning(node.pos, "Cannot reliably handle Cython dataclasses with base types " + "in external modules since it is not possible to tell what fields they have", 2) + if base_type.dataclass_fields: + fields = base_type.dataclass_fields.copy() + break + base_type = base_type.base_type + + for entry in var_entries: + name = entry.name + is_initvar = entry.declared_with_pytyping_modifier("dataclasses.InitVar") + # TODO - classvars aren't included in "var_entries" so are missed here + # and thus this code is never triggered + is_classvar = entry.declared_with_pytyping_modifier("typing.ClassVar") + if name in default_value_assignments: + assignment = default_value_assignments[name] + if (isinstance(assignment, ExprNodes.CallNode) and ( + assignment.function.as_cython_attribute() == "dataclasses.field" or + Builtin.exprnode_to_known_standard_library_name( + assignment.function, node.scope) == "dataclasses.field")): + # I believe most of this is well-enforced when it's treated as a directive + # but it doesn't hurt to make sure + valid_general_call = (isinstance(assignment, ExprNodes.GeneralCallNode) + and isinstance(assignment.positional_args, ExprNodes.TupleNode) + and not assignment.positional_args.args + and (assignment.keyword_args is None or isinstance(assignment.keyword_args, ExprNodes.DictNode))) + valid_simple_call = (isinstance(assignment, ExprNodes.SimpleCallNode) and not assignment.args) + if not (valid_general_call or valid_simple_call): + error(assignment.pos, "Call to 'cython.dataclasses.field' must only consist " + "of compile-time keyword arguments") + continue + keyword_args = assignment.keyword_args.as_python_dict() if valid_general_call and assignment.keyword_args else {} + if 'default' in keyword_args and 'default_factory' in keyword_args: + error(assignment.pos, "cannot specify both default and default_factory") + continue + field = Field(node.pos, **keyword_args) + else: + if assignment.type in [Builtin.list_type, Builtin.dict_type, Builtin.set_type]: + # The standard library module generates a TypeError at runtime + # in this situation. + # Error message is copied from CPython + error(assignment.pos, "mutable default for field {1} is not allowed: " + "use default_factory".format(assignment.type.name, name)) + + field = Field(node.pos, default=assignment) + else: + field = Field(node.pos) + field.is_initvar = is_initvar + field.is_classvar = is_classvar + if entry.visibility == "private": + field.private = True + fields[name] = field + node.entry.type.dataclass_fields = fields + return fields + + +def handle_cclass_dataclass(node, dataclass_args, analyse_decs_transform): + # default argument values from https://docs.python.org/3/library/dataclasses.html + kwargs = dict(init=True, repr=True, eq=True, + order=False, unsafe_hash=False, + frozen=False, kw_only=False) + if dataclass_args is not None: + if dataclass_args[0]: + error(node.pos, "cython.dataclasses.dataclass takes no positional arguments") + for k, v in dataclass_args[1].items(): + if k not in kwargs: + error(node.pos, + "cython.dataclasses.dataclass() got an unexpected keyword argument '%s'" % k) + if not isinstance(v, ExprNodes.BoolNode): + error(node.pos, + "Arguments passed to cython.dataclasses.dataclass must be True or False") + kwargs[k] = v.value + + kw_only = kwargs['kw_only'] + + fields = process_class_get_fields(node) + + dataclass_module = make_dataclasses_module_callnode(node.pos) + + # create __dataclass_params__ attribute. I try to use the exact + # `_DataclassParams` class defined in the standard library module if at all possible + # for maximum duck-typing compatibility. + dataclass_params_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module, + attribute=EncodedString("_DataclassParams")) + dataclass_params_keywords = ExprNodes.DictNode.from_pairs( + node.pos, + [ (ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)), + ExprNodes.BoolNode(node.pos, value=v)) + for k, v in kwargs.items() ] + + [ (ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)), + ExprNodes.BoolNode(node.pos, value=v)) + for k, v in [('kw_only', kw_only), ('match_args', False), + ('slots', False), ('weakref_slot', False)] + ]) + dataclass_params = make_dataclass_call_helper( + node.pos, dataclass_params_func, dataclass_params_keywords) + dataclass_params_assignment = Nodes.SingleAssignmentNode( + node.pos, + lhs = ExprNodes.NameNode(node.pos, name=EncodedString("__dataclass_params__")), + rhs = dataclass_params) + + dataclass_fields_stats = _set_up_dataclass_fields(node, fields, dataclass_module) + + stats = Nodes.StatListNode(node.pos, + stats=[dataclass_params_assignment] + dataclass_fields_stats) + + code = TemplateCode() + generate_init_code(code, kwargs['init'], node, fields, kw_only) + generate_repr_code(code, kwargs['repr'], node, fields) + generate_eq_code(code, kwargs['eq'], node, fields) + generate_order_code(code, kwargs['order'], node, fields) + generate_hash_code(code, kwargs['unsafe_hash'], kwargs['eq'], kwargs['frozen'], node, fields) + + stats.stats += code.generate_tree().stats + + # turn off annotation typing, so all arguments to __init__ are accepted as + # generic objects and thus can accept _HAS_DEFAULT_FACTORY. + # Type conversion comes later + comp_directives = Nodes.CompilerDirectivesNode(node.pos, + directives=copy_inherited_directives(node.scope.directives, annotation_typing=False), + body=stats) + + comp_directives.analyse_declarations(node.scope) + # probably already in this scope, but it doesn't hurt to make sure + analyse_decs_transform.enter_scope(node, node.scope) + analyse_decs_transform.visit(comp_directives) + analyse_decs_transform.exit_scope() + + node.body.stats.append(comp_directives) + + +def generate_init_code(code, init, node, fields, kw_only): + """ + Notes on CPython generated "__init__": + * Implemented in `_init_fn`. + * The use of the `dataclasses._HAS_DEFAULT_FACTORY` sentinel value as + the default argument for fields that need constructing with a factory + function is copied from the CPython implementation. (`None` isn't + suitable because it could also be a value for the user to pass.) + There's no real reason why it needs importing from the dataclasses module + though - it could equally be a value generated by Cython when the module loads. + * seen_default and the associated error message are copied directly from Python + * Call to user-defined __post_init__ function (if it exists) is copied from + CPython. + + Cython behaviour deviates a little here (to be decided if this is right...) + Because the class variable from the assignment does not exist Cython fields will + return None (or whatever their type default is) if not initialized while Python + dataclasses will fall back to looking up the class variable. + """ + if not init or node.scope.lookup_here("__init__"): + return + + # selfname behaviour copied from the cpython module + selfname = "__dataclass_self__" if "self" in fields else "self" + args = [selfname] + + if kw_only: + args.append("*") + + function_start_point = code.insertion_point() + code = code.insertion_point() + + # create a temp to get _HAS_DEFAULT_FACTORY + dataclass_module = make_dataclasses_module_callnode(node.pos) + has_default_factory = ExprNodes.AttributeNode( + node.pos, + obj=dataclass_module, + attribute=EncodedString("_HAS_DEFAULT_FACTORY") + ) + + default_factory_placeholder = code.new_placeholder(fields, has_default_factory) + + seen_default = False + for name, field in fields.items(): + entry = node.scope.lookup(name) + if entry.annotation: + annotation = u": %s" % entry.annotation.string.value + else: + annotation = u"" + assignment = u'' + if field.default is not MISSING or field.default_factory is not MISSING: + seen_default = True + if field.default_factory is not MISSING: + ph_name = default_factory_placeholder + else: + ph_name = code.new_placeholder(fields, field.default) # 'default' should be a node + assignment = u" = %s" % ph_name + elif seen_default and not kw_only and field.init.value: + error(entry.pos, ("non-default argument '%s' follows default argument " + "in dataclass __init__") % name) + code.reset() + return + + if field.init.value: + args.append(u"%s%s%s" % (name, annotation, assignment)) + + if field.is_initvar: + continue + elif field.default_factory is MISSING: + if field.init.value: + code.add_code_line(u" %s.%s = %s" % (selfname, name, name)) + elif assignment: + # not an argument to the function, but is still initialized + code.add_code_line(u" %s.%s%s" % (selfname, name, assignment)) + else: + ph_name = code.new_placeholder(fields, field.default_factory) + if field.init.value: + # close to: + # def __init__(self, name=_PLACEHOLDER_VALUE): + # self.name = name_default_factory() if name is _PLACEHOLDER_VALUE else name + code.add_code_line(u" %s.%s = %s() if %s is %s else %s" % ( + selfname, name, ph_name, name, default_factory_placeholder, name)) + else: + # still need to use the default factory to initialize + code.add_code_line(u" %s.%s = %s()" % ( + selfname, name, ph_name)) + + if node.scope.lookup("__post_init__"): + post_init_vars = ", ".join(name for name, field in fields.items() + if field.is_initvar) + code.add_code_line(" %s.__post_init__(%s)" % (selfname, post_init_vars)) + + if code.empty(): + code.add_code_line(" pass") + + args = u", ".join(args) + function_start_point.add_code_line(u"def __init__(%s):" % args) + + +def generate_repr_code(code, repr, node, fields): + """ + The core of the CPython implementation is just: + ['return self.__class__.__qualname__ + f"(' + + ', '.join([f"{f.name}={{self.{f.name}!r}}" + for f in fields]) + + ')"'], + + The only notable difference here is self.__class__.__qualname__ -> type(self).__name__ + which is because Cython currently supports Python 2. + + However, it also has some guards for recursive repr invocations. In the standard + library implementation they're done with a wrapper decorator that captures a set + (with the set keyed by id and thread). Here we create a set as a thread local + variable and key only by id. + """ + if not repr or node.scope.lookup("__repr__"): + return + + # The recursive guard is likely a little costly, so skip it if possible. + # is_gc_simple defines where it can contain recursive objects + needs_recursive_guard = False + for name in fields.keys(): + entry = node.scope.lookup(name) + type_ = entry.type + if type_.is_memoryviewslice: + type_ = type_.dtype + if not type_.is_pyobject: + continue # no GC + if not type_.is_gc_simple: + needs_recursive_guard = True + break + + if needs_recursive_guard: + code.add_code_line("__pyx_recursive_repr_guard = __import__('threading').local()") + code.add_code_line("__pyx_recursive_repr_guard.running = set()") + code.add_code_line("def __repr__(self):") + if needs_recursive_guard: + code.add_code_line(" key = id(self)") + code.add_code_line(" guard_set = self.__pyx_recursive_repr_guard.running") + code.add_code_line(" if key in guard_set: return '...'") + code.add_code_line(" guard_set.add(key)") + code.add_code_line(" try:") + strs = [u"%s={self.%s!r}" % (name, name) + for name, field in fields.items() + if field.repr.value and not field.is_initvar] + format_string = u", ".join(strs) + + code.add_code_line(u' name = getattr(type(self), "__qualname__", type(self).__name__)') + code.add_code_line(u" return f'{name}(%s)'" % format_string) + if needs_recursive_guard: + code.add_code_line(" finally:") + code.add_code_line(" guard_set.remove(key)") + + +def generate_cmp_code(code, op, funcname, node, fields): + if node.scope.lookup_here(funcname): + return + + names = [name for name, field in fields.items() if (field.compare.value and not field.is_initvar)] + + code.add_code_lines([ + "def %s(self, other):" % funcname, + " if other.__class__ is not self.__class__:" + " return NotImplemented", + # + " cdef %s other_cast" % node.class_name, + " other_cast = <%s>other" % node.class_name, + ]) + + # The Python implementation of dataclasses.py does a tuple comparison + # (roughly): + # return self._attributes_to_tuple() {op} other._attributes_to_tuple() + # + # For the Cython implementation a tuple comparison isn't an option because + # not all attributes can be converted to Python objects and stored in a tuple + # + # TODO - better diagnostics of whether the types support comparison before + # generating the code. Plus, do we want to convert C structs to dicts and + # compare them that way (I think not, but it might be in demand)? + checks = [] + op_without_equals = op.replace('=', '') + + for name in names: + if op != '==': + # tuple comparison rules - early elements take precedence + code.add_code_line(" if self.%s %s other_cast.%s: return True" % ( + name, op_without_equals, name)) + code.add_code_line(" if self.%s != other_cast.%s: return False" % ( + name, name)) + if "=" in op: + code.add_code_line(" return True") # "() == ()" is True + else: + code.add_code_line(" return False") + + +def generate_eq_code(code, eq, node, fields): + if not eq: + return + generate_cmp_code(code, "==", "__eq__", node, fields) + + +def generate_order_code(code, order, node, fields): + if not order: + return + + for op, name in [("<", "__lt__"), + ("<=", "__le__"), + (">", "__gt__"), + (">=", "__ge__")]: + generate_cmp_code(code, op, name, node, fields) + + +def generate_hash_code(code, unsafe_hash, eq, frozen, node, fields): + """ + Copied from CPython implementation - the intention is to follow this as far as + is possible: + # +------------------- unsafe_hash= parameter + # | +----------- eq= parameter + # | | +--- frozen= parameter + # | | | + # v v v | | | + # | no | yes | <--- class has explicitly defined __hash__ + # +=======+=======+=======+========+========+ + # | False | False | False | | | No __eq__, use the base class __hash__ + # +-------+-------+-------+--------+--------+ + # | False | False | True | | | No __eq__, use the base class __hash__ + # +-------+-------+-------+--------+--------+ + # | False | True | False | None | | <-- the default, not hashable + # +-------+-------+-------+--------+--------+ + # | False | True | True | add | | Frozen, so hashable, allows override + # +-------+-------+-------+--------+--------+ + # | True | False | False | add | raise | Has no __eq__, but hashable + # +-------+-------+-------+--------+--------+ + # | True | False | True | add | raise | Has no __eq__, but hashable + # +-------+-------+-------+--------+--------+ + # | True | True | False | add | raise | Not frozen, but hashable + # +-------+-------+-------+--------+--------+ + # | True | True | True | add | raise | Frozen, so hashable + # +=======+=======+=======+========+========+ + # For boxes that are blank, __hash__ is untouched and therefore + # inherited from the base class. If the base is object, then + # id-based hashing is used. + + The Python implementation creates a tuple of all the fields, then hashes them. + This implementation creates a tuple of all the hashes of all the fields and hashes that. + The reason for this slight difference is to avoid to-Python conversions for anything + that Cython knows how to hash directly (It doesn't look like this currently applies to + anything though...). + """ + + hash_entry = node.scope.lookup_here("__hash__") + if hash_entry: + # TODO ideally assignment of __hash__ to None shouldn't trigger this + # but difficult to get the right information here + if unsafe_hash: + # error message taken from CPython dataclasses module + error(node.pos, "Cannot overwrite attribute __hash__ in class %s" % node.class_name) + return + + if not unsafe_hash: + if not eq: + return + if not frozen: + code.add_extra_statements([ + Nodes.SingleAssignmentNode( + node.pos, + lhs=ExprNodes.NameNode(node.pos, name=EncodedString("__hash__")), + rhs=ExprNodes.NoneNode(node.pos), + ) + ]) + return + + names = [ + name for name, field in fields.items() + if not field.is_initvar and ( + field.compare.value if field.hash.value is None else field.hash.value) + ] + + # make a tuple of the hashes + hash_tuple_items = u", ".join(u"self.%s" % name for name in names) + if hash_tuple_items: + hash_tuple_items += u"," # ensure that one arg form is a tuple + + # if we're here we want to generate a hash + code.add_code_lines([ + "def __hash__(self):", + " return hash((%s))" % hash_tuple_items, + ]) + + +def get_field_type(pos, entry): + """ + sets the .type attribute for a field + + Returns the annotation if possible (since this is what the dataclasses + module does). If not (for example, attributes defined with cdef) then + it creates a string fallback. + """ + if entry.annotation: + # Right now it doesn't look like cdef classes generate an + # __annotations__ dict, therefore it's safe to just return + # entry.annotation + # (TODO: remove .string if we ditch PEP563) + return entry.annotation.string + # If they do in future then we may need to look up into that + # to duplicating the node. The code below should do this: + #class_name_node = ExprNodes.NameNode(pos, name=entry.scope.name) + #annotations = ExprNodes.AttributeNode( + # pos, obj=class_name_node, + # attribute=EncodedString("__annotations__") + #) + #return ExprNodes.IndexNode( + # pos, base=annotations, + # index=ExprNodes.StringNode(pos, value=entry.name) + #) + else: + # it's slightly unclear what the best option is here - we could + # try to return PyType_Type. This case should only happen with + # attributes defined with cdef so Cython is free to make it's own + # decision + s = EncodedString(entry.type.declaration_code("", for_display=1)) + return ExprNodes.StringNode(pos, value=s) + + +class FieldRecordNode(ExprNodes.ExprNode): + """ + __dataclass_fields__ contains a bunch of field objects recording how each field + of the dataclass was initialized (mainly corresponding to the arguments passed to + the "field" function). This node is used for the attributes of these field objects. + + If possible, coerces `arg` to a Python object. + Otherwise, generates a sensible backup string. + """ + subexprs = ['arg'] + + def __init__(self, pos, arg): + super(FieldRecordNode, self).__init__(pos, arg=arg) + + def analyse_types(self, env): + self.arg.analyse_types(env) + self.type = self.arg.type + return self + + def coerce_to_pyobject(self, env): + if self.arg.type.can_coerce_to_pyobject(env): + return self.arg.coerce_to_pyobject(env) + else: + # A string representation of the code that gave the field seems like a reasonable + # fallback. This'll mostly happen for "default" and "default_factory" where the + # type may be a C-type that can't be converted to Python. + return self._make_string() + + def _make_string(self): + from .AutoDocTransforms import AnnotationWriter + writer = AnnotationWriter(description="Dataclass field") + string = writer.write(self.arg) + return ExprNodes.StringNode(self.pos, value=EncodedString(string)) + + def generate_evaluation_code(self, code): + return self.arg.generate_evaluation_code(code) + + +def _set_up_dataclass_fields(node, fields, dataclass_module): + # For defaults and default_factories containing things like lambda, + # they're already declared in the class scope, and it creates a big + # problem if multiple copies are floating around in both the __init__ + # function, and in the __dataclass_fields__ structure. + # Therefore, create module-level constants holding these values and + # pass those around instead + # + # If possible we use the `Field` class defined in the standard library + # module so that the information stored here is as close to a regular + # dataclass as is possible. + variables_assignment_stats = [] + for name, field in fields.items(): + if field.private: + continue # doesn't appear in the public interface + for attrname in [ "default", "default_factory" ]: + field_default = getattr(field, attrname) + if field_default is MISSING or field_default.is_literal or field_default.is_name: + # some simple cases where we don't need to set up + # the variable as a module-level constant + continue + global_scope = node.scope.global_scope() + module_field_name = global_scope.mangle( + global_scope.mangle(Naming.dataclass_field_default_cname, node.class_name), + name) + # create an entry in the global scope for this variable to live + field_node = ExprNodes.NameNode(field_default.pos, name=EncodedString(module_field_name)) + field_node.entry = global_scope.declare_var( + field_node.name, type=field_default.type or PyrexTypes.unspecified_type, + pos=field_default.pos, cname=field_node.name, is_cdef=True, + # TODO: do we need to set 'pytyping_modifiers' here? + ) + # replace the field so that future users just receive the namenode + setattr(field, attrname, field_node) + + variables_assignment_stats.append( + Nodes.SingleAssignmentNode(field_default.pos, lhs=field_node, rhs=field_default)) + + placeholders = {} + field_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module, + attribute=EncodedString("field")) + dc_fields = ExprNodes.DictNode(node.pos, key_value_pairs=[]) + dc_fields_namevalue_assignments = [] + + for name, field in fields.items(): + if field.private: + continue # doesn't appear in the public interface + type_placeholder_name = "PLACEHOLDER_%s" % name + placeholders[type_placeholder_name] = get_field_type( + node.pos, node.scope.entries[name] + ) + + # defining these make the fields introspect more like a Python dataclass + field_type_placeholder_name = "PLACEHOLDER_FIELD_TYPE_%s" % name + if field.is_initvar: + placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode( + node.pos, obj=dataclass_module, + attribute=EncodedString("_FIELD_INITVAR") + ) + elif field.is_classvar: + # TODO - currently this isn't triggered + placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode( + node.pos, obj=dataclass_module, + attribute=EncodedString("_FIELD_CLASSVAR") + ) + else: + placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode( + node.pos, obj=dataclass_module, + attribute=EncodedString("_FIELD") + ) + + dc_field_keywords = ExprNodes.DictNode.from_pairs( + node.pos, + [(ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)), + FieldRecordNode(node.pos, arg=v)) + for k, v in field.iterate_record_node_arguments()] + + ) + dc_field_call = make_dataclass_call_helper( + node.pos, field_func, dc_field_keywords + ) + dc_fields.key_value_pairs.append( + ExprNodes.DictItemNode( + node.pos, + key=ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(name)), + value=dc_field_call)) + dc_fields_namevalue_assignments.append( + dedent(u"""\ + __dataclass_fields__[{0!r}].name = {0!r} + __dataclass_fields__[{0!r}].type = {1} + __dataclass_fields__[{0!r}]._field_type = {2} + """).format(name, type_placeholder_name, field_type_placeholder_name)) + + dataclass_fields_assignment = \ + Nodes.SingleAssignmentNode(node.pos, + lhs = ExprNodes.NameNode(node.pos, + name=EncodedString("__dataclass_fields__")), + rhs = dc_fields) + + dc_fields_namevalue_assignments = u"\n".join(dc_fields_namevalue_assignments) + dc_fields_namevalue_assignments = TreeFragment(dc_fields_namevalue_assignments, + level="c_class", + pipeline=[NormalizeTree(None)]) + dc_fields_namevalue_assignments = dc_fields_namevalue_assignments.substitute(placeholders) + + return (variables_assignment_stats + + [dataclass_fields_assignment] + + dc_fields_namevalue_assignments.stats) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/DebugFlags.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/DebugFlags.py new file mode 100644 index 0000000000000000000000000000000000000000..e830ab1849cf506ec10ab38ebd850a0a398c0431 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/DebugFlags.py @@ -0,0 +1,21 @@ +# Can be enabled at the command line with --debug-xxx. + +debug_disposal_code = 0 +debug_temp_alloc = 0 +debug_coercion = 0 + +# Write comments into the C code that show where temporary variables +# are allocated and released. +debug_temp_code_comments = 0 + +# Write a call trace of the code generation phase into the C code. +debug_trace_code_generation = 0 + +# Do not replace exceptions with user-friendly error messages. +debug_no_exception_intercept = 0 + +# Print a message each time a new stage in the pipeline is entered. +debug_verbose_pipeline = 0 + +# Raise an exception when an error is encountered. +debug_exception_on_error = 0 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Errors.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Errors.py new file mode 100644 index 0000000000000000000000000000000000000000..f3be0fd8b0216f57c46914f8fc421cb181582590 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Errors.py @@ -0,0 +1,300 @@ +# +# Errors +# + +from __future__ import absolute_import + +try: + from __builtin__ import basestring as any_string_type +except ImportError: + any_string_type = (bytes, str) + +import sys +from contextlib import contextmanager + +try: + from threading import local as _threadlocal +except ImportError: + class _threadlocal(object): pass + +threadlocal = _threadlocal() + +from ..Utils import open_new_file +from . import DebugFlags +from . import Options + + +class PyrexError(Exception): + pass + + +class PyrexWarning(Exception): + pass + +class CannotSpecialize(PyrexError): + pass + +def context(position): + source = position[0] + assert not (isinstance(source, any_string_type)), ( + "Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source) + try: + F = source.get_lines() + except UnicodeDecodeError: + # file has an encoding problem + s = u"[unprintable code]\n" + else: + s = u''.join(F[max(0, position[1]-6):position[1]]) + s = u'...\n%s%s^\n' % (s, u' '*(position[2])) + s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60) + return s + +def format_position(position): + if position: + return u"%s:%d:%d: " % (position[0].get_error_description(), + position[1], position[2]) + return u'' + +def format_error(message, position): + if position: + pos_str = format_position(position) + cont = context(position) + message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'') + return message + +class CompileError(PyrexError): + + def __init__(self, position = None, message = u""): + self.position = position + self.message_only = message + self.formatted_message = format_error(message, position) + self.reported = False + Exception.__init__(self, self.formatted_message) + # Python Exception subclass pickling is broken, + # see https://bugs.python.org/issue1692335 + self.args = (position, message) + + def __str__(self): + return self.formatted_message + +class CompileWarning(PyrexWarning): + + def __init__(self, position = None, message = ""): + self.position = position + Exception.__init__(self, format_position(position) + message) + +class InternalError(Exception): + # If this is ever raised, there is a bug in the compiler. + + def __init__(self, message): + self.message_only = message + Exception.__init__(self, u"Internal compiler error: %s" + % message) + +class AbortError(Exception): + # Throw this to stop the compilation immediately. + + def __init__(self, message): + self.message_only = message + Exception.__init__(self, u"Abort error: %s" % message) + +class CompilerCrash(CompileError): + # raised when an unexpected exception occurs in a transform + def __init__(self, pos, context, message, cause, stacktrace=None): + if message: + message = u'\n' + message + else: + message = u'\n' + self.message_only = message + if context: + message = u"Compiler crash in %s%s" % (context, message) + if stacktrace: + import traceback + message += ( + u'\n\nCompiler crash traceback from this point on:\n' + + u''.join(traceback.format_tb(stacktrace))) + if cause: + if not stacktrace: + message += u'\n' + message += u'%s: %s' % (cause.__class__.__name__, cause) + CompileError.__init__(self, pos, message) + # Python Exception subclass pickling is broken, + # see https://bugs.python.org/issue1692335 + self.args = (pos, context, message, cause, stacktrace) + +class NoElementTreeInstalledException(PyrexError): + """raised when the user enabled options.gdb_debug but no ElementTree + implementation was found + """ + +def open_listing_file(path, echo_to_stderr=True): + # Begin a new error listing. If path is None, no file + # is opened, the error counter is just reset. + if path is not None: + threadlocal.cython_errors_listing_file = open_new_file(path) + else: + threadlocal.cython_errors_listing_file = None + if echo_to_stderr: + threadlocal.cython_errors_echo_file = sys.stderr + else: + threadlocal.cython_errors_echo_file = None + threadlocal.cython_errors_count = 0 + +def close_listing_file(): + if threadlocal.cython_errors_listing_file: + threadlocal.cython_errors_listing_file.close() + threadlocal.cython_errors_listing_file = None + +def report_error(err, use_stack=True): + error_stack = threadlocal.cython_errors_stack + if error_stack and use_stack: + error_stack[-1].append(err) + else: + # See Main.py for why dual reporting occurs. Quick fix for now. + if err.reported: return + err.reported = True + try: line = u"%s\n" % err + except UnicodeEncodeError: + # Python <= 2.5 does this for non-ASCII Unicode exceptions + line = format_error(getattr(err, 'message_only', "[unprintable exception message]"), + getattr(err, 'position', None)) + u'\n' + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + try: listing_file.write(line) + except UnicodeEncodeError: + listing_file.write(line.encode('ASCII', 'replace')) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + try: echo_file.write(line) + except UnicodeEncodeError: + echo_file.write(line.encode('ASCII', 'replace')) + threadlocal.cython_errors_count += 1 + if Options.fast_fail: + raise AbortError("fatal errors") + +def error(position, message): + #print("Errors.error:", repr(position), repr(message)) ### + if position is None: + raise InternalError(message) + err = CompileError(position, message) + if DebugFlags.debug_exception_on_error: raise Exception(err) # debug + report_error(err) + return err + + +LEVEL = 1 # warn about all errors level 1 or higher + +def _write_file_encode(file, line): + try: + file.write(line) + except UnicodeEncodeError: + file.write(line.encode('ascii', 'replace')) + + +def performance_hint(position, message, env): + if not env.directives['show_performance_hints']: + return + warn = CompileWarning(position, message) + line = "performance hint: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + return warn + + +def message(position, message, level=1): + if level < LEVEL: + return + warn = CompileWarning(position, message) + line = u"note: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + return warn + + +def warning(position, message, level=0): + if level < LEVEL: + return + if Options.warning_errors and position: + return error(position, message) + warn = CompileWarning(position, message) + line = u"warning: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + return warn + + +def warn_once(position, message, level=0): + if level < LEVEL: + return + warn_once_seen = threadlocal.cython_errors_warn_once_seen + if message in warn_once_seen: + return + warn = CompileWarning(position, message) + line = u"warning: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + warn_once_seen.add(message) + return warn + + +# These functions can be used to momentarily suppress errors. + +def hold_errors(): + errors = [] + threadlocal.cython_errors_stack.append(errors) + return errors + + +def release_errors(ignore=False): + held_errors = threadlocal.cython_errors_stack.pop() + if not ignore: + for err in held_errors: + report_error(err) + + +def held_errors(): + return threadlocal.cython_errors_stack[-1] + + +# same as context manager: + +@contextmanager +def local_errors(ignore=False): + errors = hold_errors() + try: + yield errors + finally: + release_errors(ignore=ignore) + + +# Keep all global state in thread local storage to support parallel cythonisation in distutils. + +def init_thread(): + threadlocal.cython_errors_count = 0 + threadlocal.cython_errors_listing_file = None + threadlocal.cython_errors_echo_file = None + threadlocal.cython_errors_warn_once_seen = set() + threadlocal.cython_errors_stack = [] + +def reset(): + threadlocal.cython_errors_warn_once_seen.clear() + del threadlocal.cython_errors_stack[:] + +def get_errors_count(): + return threadlocal.cython_errors_count diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/FusedNode.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/FusedNode.py new file mode 100644 index 0000000000000000000000000000000000000000..2ce4142f1cb4ebed9a7e5fc7bc52e200efc9006a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/FusedNode.py @@ -0,0 +1,1015 @@ +from __future__ import absolute_import + +import copy + +from . import (ExprNodes, PyrexTypes, MemoryView, + ParseTreeTransforms, StringEncoding, Errors, + Naming) +from .ExprNodes import CloneNode, ProxyNode, TupleNode +from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode +from ..Utils import OrderedSet +from .Errors import error, CannotSpecialize + + +class FusedCFuncDefNode(StatListNode): + """ + This node replaces a function with fused arguments. It deep-copies the + function for every permutation of fused types, and allocates a new local + scope for it. It keeps track of the original function in self.node, and + the entry of the original function in the symbol table is given the + 'fused_cfunction' attribute which points back to us. + Then when a function lookup occurs (to e.g. call it), the call can be + dispatched to the right function. + + node FuncDefNode the original function + nodes [FuncDefNode] list of copies of node with different specific types + py_func DefNode the fused python function subscriptable from + Python space + __signatures__ A DictNode mapping signature specialization strings + to PyCFunction nodes + resulting_fused_function PyCFunction for the fused DefNode that delegates + to specializations + fused_func_assignment Assignment of the fused function to the function name + defaults_tuple TupleNode of defaults (letting PyCFunctionNode build + defaults would result in many different tuples) + specialized_pycfuncs List of synthesized pycfunction nodes for the + specializations + code_object CodeObjectNode shared by all specializations and the + fused function + + fused_compound_types All fused (compound) types (e.g. floating[:]) + """ + + __signatures__ = None + resulting_fused_function = None + fused_func_assignment = None + defaults_tuple = None + decorators = None + + child_attrs = StatListNode.child_attrs + [ + '__signatures__', 'resulting_fused_function', 'fused_func_assignment'] + + def __init__(self, node, env): + super(FusedCFuncDefNode, self).__init__(node.pos) + + self.nodes = [] + self.node = node + + is_def = isinstance(self.node, DefNode) + if is_def: + # self.node.decorators = [] + self.copy_def(env) + else: + self.copy_cdef(env) + + # Perform some sanity checks. If anything fails, it's a bug + for n in self.nodes: + assert not n.entry.type.is_fused + assert not n.local_scope.return_type.is_fused + if node.return_type.is_fused: + assert not n.return_type.is_fused + + if not is_def and n.cfunc_declarator.optional_arg_count: + assert n.type.op_arg_struct + + node.entry.fused_cfunction = self + # Copy the nodes as AnalyseDeclarationsTransform will prepend + # self.py_func to self.stats, as we only want specialized + # CFuncDefNodes in self.nodes + self.stats = self.nodes[:] + + def copy_def(self, env): + """ + Create a copy of the original def or lambda function for specialized + versions. + """ + fused_compound_types = PyrexTypes.unique( + [arg.type for arg in self.node.args if arg.type.is_fused]) + fused_types = self._get_fused_base_types(fused_compound_types) + permutations = PyrexTypes.get_all_specialized_permutations(fused_types) + + self.fused_compound_types = fused_compound_types + + if self.node.entry in env.pyfunc_entries: + env.pyfunc_entries.remove(self.node.entry) + + for cname, fused_to_specific in permutations: + copied_node = copy.deepcopy(self.node) + # keep signature object identity for special casing in DefNode.analyse_declarations() + copied_node.entry.signature = self.node.entry.signature + + self._specialize_function_args(copied_node.args, fused_to_specific) + copied_node.return_type = self.node.return_type.specialize( + fused_to_specific) + + copied_node.analyse_declarations(env) + # copied_node.is_staticmethod = self.node.is_staticmethod + # copied_node.is_classmethod = self.node.is_classmethod + self.create_new_local_scope(copied_node, env, fused_to_specific) + self.specialize_copied_def(copied_node, cname, self.node.entry, + fused_to_specific, fused_compound_types) + + PyrexTypes.specialize_entry(copied_node.entry, cname) + copied_node.entry.used = True + env.entries[copied_node.entry.name] = copied_node.entry + + if not self.replace_fused_typechecks(copied_node): + break + + self.orig_py_func = self.node + self.py_func = self.make_fused_cpdef(self.node, env, is_def=True) + + def copy_cdef(self, env): + """ + Create a copy of the original c(p)def function for all specialized + versions. + """ + permutations = self.node.type.get_all_specialized_permutations() + # print 'Node %s has %d specializations:' % (self.node.entry.name, + # len(permutations)) + # import pprint; pprint.pprint([d for cname, d in permutations]) + + # Prevent copying of the python function + self.orig_py_func = orig_py_func = self.node.py_func + self.node.py_func = None + if orig_py_func: + env.pyfunc_entries.remove(orig_py_func.entry) + + fused_types = self.node.type.get_fused_types() + self.fused_compound_types = fused_types + + new_cfunc_entries = [] + for cname, fused_to_specific in permutations: + copied_node = copy.deepcopy(self.node) + + # Make the types in our CFuncType specific. + try: + type = copied_node.type.specialize(fused_to_specific) + except CannotSpecialize: + # unlike for the argument types, specializing the return type can fail + error(copied_node.pos, "Return type is a fused type that cannot " + "be determined from the function arguments") + self.py_func = None # this is just to let the compiler exit gracefully + return + entry = copied_node.entry + type.specialize_entry(entry, cname) + + # Reuse existing Entries (e.g. from .pxd files). + for i, orig_entry in enumerate(env.cfunc_entries): + if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type): + copied_node.entry = env.cfunc_entries[i] + if not copied_node.entry.func_cname: + copied_node.entry.func_cname = entry.func_cname + entry = copied_node.entry + type = entry.type + break + else: + new_cfunc_entries.append(entry) + + copied_node.type = type + entry.type, type.entry = type, entry + + entry.used = (entry.used or + self.node.entry.defined_in_pxd or + env.is_c_class_scope or + entry.is_cmethod) + + if self.node.cfunc_declarator.optional_arg_count: + self.node.cfunc_declarator.declare_optional_arg_struct( + type, env, fused_cname=cname) + + copied_node.return_type = type.return_type + self.create_new_local_scope(copied_node, env, fused_to_specific) + + # Make the argument types in the CFuncDeclarator specific + self._specialize_function_args(copied_node.cfunc_declarator.args, + fused_to_specific) + + # If a cpdef, declare all specialized cpdefs (this + # also calls analyse_declarations) + copied_node.declare_cpdef_wrapper(env) + if copied_node.py_func: + env.pyfunc_entries.remove(copied_node.py_func.entry) + + self.specialize_copied_def( + copied_node.py_func, cname, self.node.entry.as_variable, + fused_to_specific, fused_types) + + if not self.replace_fused_typechecks(copied_node): + break + + # replace old entry with new entries + if self.node.entry in env.cfunc_entries: + cindex = env.cfunc_entries.index(self.node.entry) + env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries + else: + env.cfunc_entries.extend(new_cfunc_entries) + + if orig_py_func: + self.py_func = self.make_fused_cpdef(orig_py_func, env, + is_def=False) + else: + self.py_func = orig_py_func + + def _get_fused_base_types(self, fused_compound_types): + """ + Get a list of unique basic fused types, from a list of + (possibly) compound fused types. + """ + base_types = [] + seen = set() + for fused_type in fused_compound_types: + fused_type.get_fused_types(result=base_types, seen=seen) + return base_types + + def _specialize_function_args(self, args, fused_to_specific): + for arg in args: + if arg.type.is_fused: + arg.type = arg.type.specialize(fused_to_specific) + if arg.type.is_memoryviewslice: + arg.type.validate_memslice_dtype(arg.pos) + if arg.annotation: + # TODO might be nice if annotations were specialized instead? + # (Or might be hard to do reliably) + arg.annotation.untyped = True + + def create_new_local_scope(self, node, env, f2s): + """ + Create a new local scope for the copied node and append it to + self.nodes. A new local scope is needed because the arguments with the + fused types are already in the local scope, and we need the specialized + entries created after analyse_declarations on each specialized version + of the (CFunc)DefNode. + f2s is a dict mapping each fused type to its specialized version + """ + node.create_local_scope(env) + node.local_scope.fused_to_specific = f2s + + # This is copied from the original function, set it to false to + # stop recursion + node.has_fused_arguments = False + self.nodes.append(node) + + def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types): + """Specialize the copy of a DefNode given the copied node, + the specialization cname and the original DefNode entry""" + fused_types = self._get_fused_base_types(fused_compound_types) + type_strings = [ + PyrexTypes.specialization_signature_string(fused_type, f2s) + for fused_type in fused_types + ] + + node.specialized_signature_string = '|'.join(type_strings) + + node.entry.pymethdef_cname = PyrexTypes.get_fused_cname( + cname, node.entry.pymethdef_cname) + node.entry.doc = py_entry.doc + node.entry.doc_cname = py_entry.doc_cname + + def replace_fused_typechecks(self, copied_node): + """ + Branch-prune fused type checks like + + if fused_t is int: + ... + + Returns whether an error was issued and whether we should stop in + in order to prevent a flood of errors. + """ + num_errors = Errors.get_errors_count() + transform = ParseTreeTransforms.ReplaceFusedTypeChecks( + copied_node.local_scope) + transform(copied_node) + + if Errors.get_errors_count() > num_errors: + return False + + return True + + def _fused_instance_checks(self, normal_types, pyx_code, env): + """ + Generate Cython code for instance checks, matching an object to + specialized types. + """ + for specialized_type in normal_types: + # all_numeric = all_numeric and specialized_type.is_numeric + py_type_name = specialized_type.py_type_name() + if py_type_name == 'int': + # Support Python 2 long + py_type_name = '(int, long)' + pyx_code.context.update( + py_type_name=py_type_name, + specialized_type_name=specialized_type.specialization_string, + ) + pyx_code.put_chunk( + u""" + if isinstance(arg, {{py_type_name}}): + dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break + """) + + def _dtype_name(self, dtype): + name = str(dtype).replace('_', '__').replace(' ', '_') + if dtype.is_typedef: + name = Naming.fused_dtype_prefix + name + return name + + def _dtype_type(self, dtype): + if dtype.is_typedef: + return self._dtype_name(dtype) + return str(dtype) + + def _sizeof_dtype(self, dtype): + if dtype.is_pyobject: + return 'sizeof(void *)' + else: + return "sizeof(%s)" % self._dtype_type(dtype) + + def _buffer_check_numpy_dtype_setup_cases(self, pyx_code): + "Setup some common cases to match dtypes against specializations" + with pyx_code.indenter("if kind in u'iu':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_int") + + with pyx_code.indenter("elif kind == u'f':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_float") + + with pyx_code.indenter("elif kind == u'c':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_complex") + + with pyx_code.indenter("elif kind == u'O':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_object") + + match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'" + no_match = "dest_sig[{{dest_sig_idx}}] = None" + def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types): + """ + Match a numpy dtype object to the individual specializations. + """ + self._buffer_check_numpy_dtype_setup_cases(pyx_code) + + for specialized_type in pythran_types+specialized_buffer_types: + final_type = specialized_type + if specialized_type.is_pythran_expr: + specialized_type = specialized_type.org_buffer + dtype = specialized_type.dtype + pyx_code.context.update( + itemsize_match=self._sizeof_dtype(dtype) + " == itemsize", + signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype), + dtype=dtype, + specialized_type_name=final_type.specialization_string) + + dtypes = [ + (dtype.is_int, pyx_code.dtype_int), + (dtype.is_float, pyx_code.dtype_float), + (dtype.is_complex, pyx_code.dtype_complex) + ] + + for dtype_category, codewriter in dtypes: + if not dtype_category: + continue + cond = '{{itemsize_match}} and (arg.ndim) == %d' % ( + specialized_type.ndim,) + if dtype.is_int: + cond += ' and {{signed_match}}' + + if final_type.is_pythran_expr: + cond += ' and arg_is_pythran_compatible' + + with codewriter.indenter("if %s:" % cond): + #codewriter.putln("print 'buffer match found based on numpy dtype'") + codewriter.putln(self.match) + codewriter.putln("break") + + def _buffer_parse_format_string_check(self, pyx_code, decl_code, + specialized_type, env): + """ + For each specialized type, try to coerce the object to a memoryview + slice of that type. This means obtaining a buffer and parsing the + format string. + TODO: separate buffer acquisition from format parsing + """ + dtype = specialized_type.dtype + if specialized_type.is_buffer: + axes = [('direct', 'strided')] * specialized_type.ndim + else: + axes = specialized_type.axes + + memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes) + memslice_type.create_from_py_utility_code(env) + pyx_code.context.update( + coerce_from_py_func=memslice_type.from_py_function, + dtype=dtype) + decl_code.putln( + "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)") + + pyx_code.context.update( + specialized_type_name=specialized_type.specialization_string, + sizeof_dtype=self._sizeof_dtype(dtype), + ndim_dtype=specialized_type.ndim, + dtype_is_struct_obj=int(dtype.is_struct or dtype.is_pyobject)) + + # use the memoryview object to check itemsize and ndim. + # In principle it could check more, but these are the easiest to do quickly + pyx_code.put_chunk( + u""" + # try {{dtype}} + if (((itemsize == -1 and arg_as_memoryview.itemsize == {{sizeof_dtype}}) + or itemsize == {{sizeof_dtype}}) + and arg_as_memoryview.ndim == {{ndim_dtype}}): + {{if dtype_is_struct_obj}} + if __PYX_IS_PYPY2: + # I wasn't able to diagnose why, but PyPy2 fails to convert a + # memoryview to a Cython memoryview in this case + memslice = {{coerce_from_py_func}}(arg, 0) + else: + {{else}} + if True: + {{endif}} + memslice = {{coerce_from_py_func}}(arg_as_memoryview, 0) + if memslice.memview: + __PYX_XCLEAR_MEMVIEW(&memslice, 1) + # print 'found a match for the buffer through format parsing' + %s + break + else: + __pyx_PyErr_Clear() + """ % self.match) + + def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, accept_none, env): + """ + Generate Cython code to match objects to buffer specializations. + First try to get a numpy dtype object and match it against the individual + specializations. If that fails, try naively to coerce the object + to each specialization, which obtains the buffer each time and tries + to match the format string. + """ + # The first thing to find a match in this loop breaks out of the loop + pyx_code.put_chunk( + u""" + """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u""" + if ndarray is not None: + if isinstance(arg, ndarray): + dtype = arg.dtype + """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u""" + elif __pyx_memoryview_check(arg): + arg_base = arg.base + if isinstance(arg_base, ndarray): + dtype = arg_base.dtype + else: + dtype = None + else: + dtype = None + + itemsize = -1 + if dtype is not None: + itemsize = dtype.itemsize + kind = ord(dtype.kind) + dtype_signed = kind == u'i' + """) + pyx_code.indent(2) + if pythran_types: + pyx_code.put_chunk( + u""" + # Pythran only supports the endianness of the current compiler + byteorder = dtype.byteorder + if byteorder == "<" and not __Pyx_Is_Little_Endian(): + arg_is_pythran_compatible = False + elif byteorder == ">" and __Pyx_Is_Little_Endian(): + arg_is_pythran_compatible = False + if arg_is_pythran_compatible: + cur_stride = itemsize + shape = arg.shape + strides = arg.strides + for i in range(arg.ndim-1, -1, -1): + if (strides[i]) != cur_stride: + arg_is_pythran_compatible = False + break + cur_stride *= shape[i] + else: + arg_is_pythran_compatible = not (arg.flags.f_contiguous and (arg.ndim) > 1) + """) + pyx_code.named_insertion_point("numpy_dtype_checks") + self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types) + pyx_code.dedent(2) + + if accept_none: + # If None is acceptable, then Cython <3.0 matched None with the + # first type. This behaviour isn't ideal, but keep it for backwards + # compatibility. Better behaviour would be to see if subsequent + # arguments give a stronger match. + pyx_code.context.update( + specialized_type_name=buffer_types[0].specialization_string + ) + pyx_code.put_chunk( + """ + if arg is None: + %s + break + """ % self.match) + + # creating a Cython memoryview from a Python memoryview avoids the + # need to get the buffer multiple times, and we can + # also use it to check itemsizes etc + pyx_code.put_chunk( + """ + try: + arg_as_memoryview = memoryview(arg) + except (ValueError, TypeError): + pass + """) + with pyx_code.indenter("else:"): + for specialized_type in buffer_types: + self._buffer_parse_format_string_check( + pyx_code, decl_code, specialized_type, env) + + def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types): + """ + If we have any buffer specializations, write out some variable + declarations and imports. + """ + decl_code.put_chunk( + u""" + ctypedef struct {{memviewslice_cname}}: + void *memview + + void __PYX_XCLEAR_MEMVIEW({{memviewslice_cname}} *, int have_gil) + bint __pyx_memoryview_check(object) + bint __PYX_IS_PYPY2 "(CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION == 2)" + """) + + pyx_code.local_variable_declarations.put_chunk( + u""" + cdef {{memviewslice_cname}} memslice + cdef Py_ssize_t itemsize + cdef bint dtype_signed + cdef Py_UCS4 kind + + itemsize = -1 + """) + + if pythran_types: + pyx_code.local_variable_declarations.put_chunk(u""" + cdef bint arg_is_pythran_compatible + cdef Py_ssize_t cur_stride + """) + + pyx_code.imports.put_chunk( + u""" + cdef type ndarray + ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable() + """) + + pyx_code.imports.put_chunk( + u""" + cdef memoryview arg_as_memoryview + """ + ) + + seen_typedefs = set() + seen_int_dtypes = set() + for buffer_type in all_buffer_types: + dtype = buffer_type.dtype + dtype_name = self._dtype_name(dtype) + if dtype.is_typedef: + if dtype_name not in seen_typedefs: + seen_typedefs.add(dtype_name) + decl_code.putln( + 'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name, + dtype.empty_declaration_code())) + + if buffer_type.dtype.is_int: + if str(dtype) not in seen_int_dtypes: + seen_int_dtypes.add(str(dtype)) + pyx_code.context.update(dtype_name=dtype_name, + dtype_type=self._dtype_type(dtype)) + pyx_code.local_variable_declarations.put_chunk( + u""" + cdef bint {{dtype_name}}_is_signed + {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0) + """) + + def _split_fused_types(self, arg): + """ + Specialize fused types and split into normal types and buffer types. + """ + specialized_types = PyrexTypes.get_specialized_types(arg.type) + + # Prefer long over int, etc by sorting (see type classes in PyrexTypes.py) + specialized_types.sort() + + seen_py_type_names = set() + normal_types, buffer_types, pythran_types = [], [], [] + has_object_fallback = False + for specialized_type in specialized_types: + py_type_name = specialized_type.py_type_name() + if py_type_name: + if py_type_name in seen_py_type_names: + continue + seen_py_type_names.add(py_type_name) + if py_type_name == 'object': + has_object_fallback = True + else: + normal_types.append(specialized_type) + elif specialized_type.is_pythran_expr: + pythran_types.append(specialized_type) + elif specialized_type.is_buffer or specialized_type.is_memoryviewslice: + buffer_types.append(specialized_type) + + return normal_types, buffer_types, pythran_types, has_object_fallback + + def _unpack_argument(self, pyx_code): + pyx_code.put_chunk( + u""" + # PROCESSING ARGUMENT {{arg_tuple_idx}} + if {{arg_tuple_idx}} < len(args): + arg = (args)[{{arg_tuple_idx}}] + elif kwargs is not None and '{{arg.name}}' in kwargs: + arg = (kwargs)['{{arg.name}}'] + else: + {{if arg.default}} + arg = (defaults)[{{default_idx}}] + {{else}} + {{if arg_tuple_idx < min_positional_args}} + raise TypeError("Expected at least %d argument%s, got %d" % ( + {{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(args))) + {{else}} + raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}") + {{endif}} + {{endif}} + """) + + def _fused_signature_index(self, pyx_code): + """ + Generate Cython code for constructing a persistent nested dictionary index of + fused type specialization signatures. + """ + pyx_code.put_chunk( + u""" + if not _fused_sigindex: + for sig in signatures: + sigindex_node = _fused_sigindex + *sig_series, last_type = sig.strip('()').split('|') + for sig_type in sig_series: + if sig_type not in sigindex_node: + sigindex_node[sig_type] = sigindex_node = {} + else: + sigindex_node = sigindex_node[sig_type] + sigindex_node[last_type] = sig + """ + ) + + def make_fused_cpdef(self, orig_py_func, env, is_def): + """ + This creates the function that is indexable from Python and does + runtime dispatch based on the argument types. The function gets the + arg tuple and kwargs dict (or None) and the defaults tuple + as arguments from the Binding Fused Function's tp_call. + """ + from . import TreeFragment, Code, UtilityCode + + fused_types = self._get_fused_base_types([ + arg.type for arg in self.node.args if arg.type.is_fused]) + + context = { + 'memviewslice_cname': MemoryView.memviewslice_cname, + 'func_args': self.node.args, + 'n_fused': len(fused_types), + 'min_positional_args': + self.node.num_required_args - self.node.num_required_kw_args + if is_def else + sum(1 for arg in self.node.args if arg.default is None), + 'name': orig_py_func.entry.name, + } + + pyx_code = Code.PyxCodeWriter(context=context) + decl_code = Code.PyxCodeWriter(context=context) + decl_code.put_chunk( + u""" + cdef extern from *: + void __pyx_PyErr_Clear "PyErr_Clear" () + type __Pyx_ImportNumPyArrayTypeIfAvailable() + int __Pyx_Is_Little_Endian() + """) + decl_code.indent() + + pyx_code.put_chunk( + u""" + def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}): + # FIXME: use a typed signature - currently fails badly because + # default arguments inherit the types we specify here! + + cdef list search_list + cdef dict sigindex_node + + dest_sig = [None] * {{n_fused}} + + if kwargs is not None and not kwargs: + kwargs = None + + cdef Py_ssize_t i + + # instance check body + """) + + pyx_code.indent() # indent following code to function body + pyx_code.named_insertion_point("imports") + pyx_code.named_insertion_point("func_defs") + pyx_code.named_insertion_point("local_variable_declarations") + + fused_index = 0 + default_idx = 0 + all_buffer_types = OrderedSet() + seen_fused_types = set() + for i, arg in enumerate(self.node.args): + if arg.type.is_fused: + arg_fused_types = arg.type.get_fused_types() + if len(arg_fused_types) > 1: + raise NotImplementedError("Determination of more than one fused base " + "type per argument is not implemented.") + fused_type = arg_fused_types[0] + + if arg.type.is_fused and fused_type not in seen_fused_types: + seen_fused_types.add(fused_type) + + context.update( + arg_tuple_idx=i, + arg=arg, + dest_sig_idx=fused_index, + default_idx=default_idx, + ) + + normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg) + self._unpack_argument(pyx_code) + + # 'unrolled' loop, first match breaks out of it + with pyx_code.indenter("while 1:"): + if normal_types: + self._fused_instance_checks(normal_types, pyx_code, env) + if buffer_types or pythran_types: + env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c")) + self._buffer_checks( + buffer_types, pythran_types, pyx_code, decl_code, + arg.accept_none, env) + if has_object_fallback: + pyx_code.context.update(specialized_type_name='object') + pyx_code.putln(self.match) + else: + pyx_code.putln(self.no_match) + pyx_code.putln("break") + + fused_index += 1 + all_buffer_types.update(buffer_types) + all_buffer_types.update(ty.org_buffer for ty in pythran_types) + + if arg.default: + default_idx += 1 + + if all_buffer_types: + self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types) + env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c")) + env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c")) + + self._fused_signature_index(pyx_code) + + pyx_code.put_chunk( + u""" + sigindex_matches = [] + sigindex_candidates = [_fused_sigindex] + + for dst_type in dest_sig: + found_matches = [] + found_candidates = [] + # Make two separate lists: One for signature sub-trees + # with at least one definite match, and another for + # signature sub-trees with only ambiguous matches + # (where `dest_sig[i] is None`). + if dst_type is None: + for sn in sigindex_matches: + found_matches.extend(( sn).values()) + for sn in sigindex_candidates: + found_candidates.extend(( sn).values()) + else: + for search_list in (sigindex_matches, sigindex_candidates): + for sn in search_list: + type_match = ( sn).get(dst_type) + if type_match is not None: + found_matches.append(type_match) + sigindex_matches = found_matches + sigindex_candidates = found_candidates + if not (found_matches or found_candidates): + break + + candidates = sigindex_matches + + if not candidates: + raise TypeError("No matching signature found") + elif len(candidates) > 1: + raise TypeError("Function call with ambiguous argument types") + else: + return (signatures)[candidates[0]] + """) + + fragment_code = pyx_code.getvalue() + # print decl_code.getvalue() + # print fragment_code + from .Optimize import ConstantFolding + fragment = TreeFragment.TreeFragment( + fragment_code, level='module', pipeline=[ConstantFolding()]) + ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root) + UtilityCode.declare_declarations_in_scope( + decl_code.getvalue(), env.global_scope()) + ast.scope = env + # FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self' + ast.analyse_declarations(env) + py_func = ast.stats[-1] # the DefNode + self.fragment_scope = ast.scope + + if isinstance(self.node, DefNode): + py_func.specialized_cpdefs = self.nodes[:] + else: + py_func.specialized_cpdefs = [n.py_func for n in self.nodes] + + return py_func + + def update_fused_defnode_entry(self, env): + copy_attributes = ( + 'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname', + 'pymethdef_cname', 'doc', 'doc_cname', 'is_member', + 'scope' + ) + + entry = self.py_func.entry + + for attr in copy_attributes: + setattr(entry, attr, + getattr(self.orig_py_func.entry, attr)) + + self.py_func.name = self.orig_py_func.name + self.py_func.doc = self.orig_py_func.doc + + env.entries.pop('__pyx_fused_cpdef', None) + if isinstance(self.node, DefNode): + env.entries[entry.name] = entry + else: + env.entries[entry.name].as_variable = entry + + env.pyfunc_entries.append(entry) + + self.py_func.entry.fused_cfunction = self + for node in self.nodes: + if isinstance(self.node, DefNode): + node.fused_py_func = self.py_func + else: + node.py_func.fused_py_func = self.py_func + node.entry.as_variable = entry + + self.synthesize_defnodes() + self.stats.append(self.__signatures__) + + def analyse_expressions(self, env): + """ + Analyse the expressions. Take care to only evaluate default arguments + once and clone the result for all specializations + """ + for fused_compound_type in self.fused_compound_types: + for fused_type in fused_compound_type.get_fused_types(): + for specialization_type in fused_type.types: + if specialization_type.is_complex: + specialization_type.create_declaration_utility_code(env) + + if self.py_func: + self.__signatures__ = self.__signatures__.analyse_expressions(env) + self.py_func = self.py_func.analyse_expressions(env) + self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env) + self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env) + + self.defaults = defaults = [] + + for arg in self.node.args: + if arg.default: + arg.default = arg.default.analyse_expressions(env) + if arg.default.is_literal: + defaults.append(copy.copy(arg.default)) + else: + # coerce the argument to temp since CloneNode really requires a temp + defaults.append(ProxyNode(arg.default.coerce_to_temp(env))) + else: + defaults.append(None) + + for i, stat in enumerate(self.stats): + stat = self.stats[i] = stat.analyse_expressions(env) + if isinstance(stat, FuncDefNode) and stat is not self.py_func: + # the dispatcher specifically doesn't want its defaults overriding + for arg, default in zip(stat.args, defaults): + if default is not None: + if default.is_literal: + arg.default = default.coerce_to(arg.type, env) + else: + arg.default = CloneNode(default).analyse_expressions(env).coerce_to(arg.type, env) + + if self.py_func: + args = [CloneNode(default) for default in defaults if default] + self.defaults_tuple = TupleNode(self.pos, args=args) + self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env) + self.defaults_tuple = ProxyNode(self.defaults_tuple) + self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object) + + fused_func = self.resulting_fused_function.arg + fused_func.defaults_tuple = CloneNode(self.defaults_tuple) + fused_func.code_object = CloneNode(self.code_object) + + for i, pycfunc in enumerate(self.specialized_pycfuncs): + pycfunc.code_object = CloneNode(self.code_object) + pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env) + pycfunc.defaults_tuple = CloneNode(self.defaults_tuple) + return self + + def synthesize_defnodes(self): + """ + Create the __signatures__ dict of PyCFunctionNode specializations. + """ + if isinstance(self.nodes[0], CFuncDefNode): + nodes = [node.py_func for node in self.nodes] + else: + nodes = self.nodes + + # For the moment, fused functions do not support METH_FASTCALL + for node in nodes: + node.entry.signature.use_fastcall = False + + signatures = [StringEncoding.EncodedString(node.specialized_signature_string) + for node in nodes] + keys = [ExprNodes.StringNode(node.pos, value=sig) + for node, sig in zip(nodes, signatures)] + values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True) + for node in nodes] + + self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values)) + + self.specialized_pycfuncs = values + for pycfuncnode in values: + pycfuncnode.is_specialization = True + + def generate_function_definitions(self, env, code): + if self.py_func: + self.py_func.pymethdef_required = True + self.fused_func_assignment.generate_function_definitions(env, code) + + from . import Options + for stat in self.stats: + if isinstance(stat, FuncDefNode) and ( + stat.entry.used or + (Options.cimport_from_pyx and not stat.entry.visibility == 'extern')): + code.mark_pos(stat.pos) + stat.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + # Note: all def function specialization are wrapped in PyCFunction + # nodes in the self.__signatures__ dictnode. + for default in self.defaults: + if default is not None: + default.generate_evaluation_code(code) + + if self.py_func: + self.defaults_tuple.generate_evaluation_code(code) + self.code_object.generate_evaluation_code(code) + + for stat in self.stats: + code.mark_pos(stat.pos) + if isinstance(stat, ExprNodes.ExprNode): + stat.generate_evaluation_code(code) + else: + stat.generate_execution_code(code) + + if self.__signatures__: + self.resulting_fused_function.generate_evaluation_code(code) + + code.putln( + "((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" % + (self.resulting_fused_function.result(), + self.__signatures__.result())) + self.__signatures__.generate_giveref(code) + self.__signatures__.generate_post_assignment_code(code) + self.__signatures__.free_temps(code) + + self.fused_func_assignment.generate_execution_code(code) + + # Dispose of results + self.resulting_fused_function.generate_disposal_code(code) + self.resulting_fused_function.free_temps(code) + self.defaults_tuple.generate_disposal_code(code) + self.defaults_tuple.free_temps(code) + self.code_object.generate_disposal_code(code) + self.code_object.free_temps(code) + + for default in self.defaults: + if default is not None: + default.generate_disposal_code(code) + default.free_temps(code) + + def annotate(self, code): + for stat in self.stats: + stat.annotate(code) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/StringEncoding.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/StringEncoding.py new file mode 100644 index 0000000000000000000000000000000000000000..192fc3de3e898bcda758f8f84bd84375d3934806 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/StringEncoding.py @@ -0,0 +1,392 @@ +# +# Cython -- encoding related tools +# + +from __future__ import absolute_import + +import re +import sys + +if sys.version_info[0] >= 3: + _unicode, _str, _bytes, _unichr = str, str, bytes, chr + IS_PYTHON3 = True +else: + _unicode, _str, _bytes, _unichr = unicode, str, str, unichr + IS_PYTHON3 = False + +empty_bytes = _bytes() +empty_unicode = _unicode() + +join_bytes = empty_bytes.join + + +class UnicodeLiteralBuilder(object): + """Assemble a unicode string. + """ + def __init__(self): + self.chars = [] + + def append(self, characters): + if isinstance(characters, _bytes): + # this came from a Py2 string literal in the parser code + characters = characters.decode("ASCII") + assert isinstance(characters, _unicode), str(type(characters)) + self.chars.append(characters) + + if sys.maxunicode == 65535: + def append_charval(self, char_number): + if char_number > 65535: + # wide Unicode character on narrow platform => replace + # by surrogate pair + char_number -= 0x10000 + self.chars.append( _unichr((char_number // 1024) + 0xD800) ) + self.chars.append( _unichr((char_number % 1024) + 0xDC00) ) + else: + self.chars.append( _unichr(char_number) ) + else: + def append_charval(self, char_number): + self.chars.append( _unichr(char_number) ) + + def append_uescape(self, char_number, escape_string): + self.append_charval(char_number) + + def getstring(self): + return EncodedString(u''.join(self.chars)) + + def getstrings(self): + return (None, self.getstring()) + + +class BytesLiteralBuilder(object): + """Assemble a byte string or char value. + """ + def __init__(self, target_encoding): + self.chars = [] + self.target_encoding = target_encoding + + def append(self, characters): + if isinstance(characters, _unicode): + characters = characters.encode(self.target_encoding) + assert isinstance(characters, _bytes), str(type(characters)) + self.chars.append(characters) + + def append_charval(self, char_number): + self.chars.append( _unichr(char_number).encode('ISO-8859-1') ) + + def append_uescape(self, char_number, escape_string): + self.append(escape_string) + + def getstring(self): + # this *must* return a byte string! + return bytes_literal(join_bytes(self.chars), self.target_encoding) + + def getchar(self): + # this *must* return a byte string! + return self.getstring() + + def getstrings(self): + return (self.getstring(), None) + + +class StrLiteralBuilder(object): + """Assemble both a bytes and a unicode representation of a string. + """ + def __init__(self, target_encoding): + self._bytes = BytesLiteralBuilder(target_encoding) + self._unicode = UnicodeLiteralBuilder() + + def append(self, characters): + self._bytes.append(characters) + self._unicode.append(characters) + + def append_charval(self, char_number): + self._bytes.append_charval(char_number) + self._unicode.append_charval(char_number) + + def append_uescape(self, char_number, escape_string): + self._bytes.append(escape_string) + self._unicode.append_charval(char_number) + + def getstrings(self): + return (self._bytes.getstring(), self._unicode.getstring()) + + +class EncodedString(_unicode): + # unicode string subclass to keep track of the original encoding. + # 'encoding' is None for unicode strings and the source encoding + # otherwise + encoding = None + + def __deepcopy__(self, memo): + return self + + def byteencode(self): + assert self.encoding is not None + return self.encode(self.encoding) + + def utf8encode(self): + assert self.encoding is None + return self.encode("UTF-8") + + @property + def is_unicode(self): + return self.encoding is None + + def contains_surrogates(self): + return string_contains_surrogates(self) + + def as_utf8_string(self): + return bytes_literal(self.utf8encode(), 'utf8') + + def as_c_string_literal(self): + # first encodes the string then produces a c string literal + if self.encoding is None: + s = self.as_utf8_string() + else: + s = bytes_literal(self.byteencode(), self.encoding) + return s.as_c_string_literal() + + if not hasattr(_unicode, "isascii"): + def isascii(self): + # not defined for Python3.7+ since the class already has it + try: + self.encode("ascii") + except UnicodeEncodeError: + return False + else: + return True + + +def string_contains_surrogates(ustring): + """ + Check if the unicode string contains surrogate code points + on a CPython platform with wide (UCS-4) or narrow (UTF-16) + Unicode, i.e. characters that would be spelled as two + separate code units on a narrow platform. + """ + for c in map(ord, ustring): + if c > 65535: # can only happen on wide platforms + return True + if 0xD800 <= c <= 0xDFFF: + return True + return False + + +def string_contains_lone_surrogates(ustring): + """ + Check if the unicode string contains lone surrogate code points + on a CPython platform with wide (UCS-4) or narrow (UTF-16) + Unicode, i.e. characters that would be spelled as two + separate code units on a narrow platform, but that do not form a pair. + """ + last_was_start = False + unicode_uses_surrogate_encoding = sys.maxunicode == 65535 + for c in map(ord, ustring): + # surrogates tend to be rare + if c < 0xD800 or c > 0xDFFF: + if last_was_start: + return True + elif not unicode_uses_surrogate_encoding: + # on 32bit Unicode platforms, there is never a pair + return True + elif c <= 0xDBFF: + if last_was_start: + return True # lone start + last_was_start = True + else: + if not last_was_start: + return True # lone end + last_was_start = False + return last_was_start + + +class BytesLiteral(_bytes): + # bytes subclass that is compatible with EncodedString + encoding = None + + def __deepcopy__(self, memo): + return self + + def byteencode(self): + if IS_PYTHON3: + return _bytes(self) + else: + # fake-recode the string to make it a plain bytes object + return self.decode('ISO-8859-1').encode('ISO-8859-1') + + def utf8encode(self): + assert False, "this is not a unicode string: %r" % self + + def __str__(self): + """Fake-decode the byte string to unicode to support % + formatting of unicode strings. + """ + return self.decode('ISO-8859-1') + + is_unicode = False + + def as_c_string_literal(self): + value = split_string_literal(escape_byte_string(self)) + return '"%s"' % value + + if not hasattr(_bytes, "isascii"): + def isascii(self): + # already defined for Python3.7+ + return True + + +def bytes_literal(s, encoding): + assert isinstance(s, bytes) + s = BytesLiteral(s) + s.encoding = encoding + return s + + +def encoded_string(s, encoding): + assert isinstance(s, (_unicode, bytes)) + s = EncodedString(s) + if encoding is not None: + s.encoding = encoding + return s + +def encoded_string_or_bytes_literal(s, encoding): + if isinstance(s, bytes): + return bytes_literal(s, encoding) + else: + return encoded_string(s, encoding) + + +char_from_escape_sequence = { + r'\a' : u'\a', + r'\b' : u'\b', + r'\f' : u'\f', + r'\n' : u'\n', + r'\r' : u'\r', + r'\t' : u'\t', + r'\v' : u'\v', + }.get + +_c_special = ('\\', '??', '"') + tuple(map(chr, range(32))) + + +def _to_escape_sequence(s): + if s in '\n\r\t': + return repr(s)[1:-1] + elif s == '"': + return r'\"' + elif s == '\\': + return r'\\' + else: + # within a character sequence, oct passes much better than hex + return ''.join(['\\%03o' % ord(c) for c in s]) + + +def _build_specials_replacer(): + subexps = [] + replacements = {} + for special in _c_special: + regexp = ''.join(['[%s]' % c.replace('\\', '\\\\') for c in special]) + subexps.append(regexp) + replacements[special.encode('ASCII')] = _to_escape_sequence(special).encode('ASCII') + sub = re.compile(('(%s)' % '|'.join(subexps)).encode('ASCII')).sub + def replace_specials(m): + return replacements[m.group(1)] + def replace(s): + return sub(replace_specials, s) + return replace + +_replace_specials = _build_specials_replacer() + + +def escape_char(c): + if IS_PYTHON3: + c = c.decode('ISO-8859-1') + if c in '\n\r\t\\': + return repr(c)[1:-1] + elif c == "'": + return "\\'" + n = ord(c) + if n < 32 or n > 127: + # hex works well for characters + return "\\x%02X" % n + else: + return c + +def escape_byte_string(s): + """Escape a byte string so that it can be written into C code. + Note that this returns a Unicode string instead which, when + encoded as ISO-8859-1, will result in the correct byte sequence + being written. + """ + s = _replace_specials(s) + try: + return s.decode("ASCII") # trial decoding: plain ASCII => done + except UnicodeDecodeError: + pass + if IS_PYTHON3: + s_new = bytearray() + append, extend = s_new.append, s_new.extend + for b in s: + if b >= 128: + extend(('\\%3o' % b).encode('ASCII')) + else: + append(b) + return s_new.decode('ISO-8859-1') + else: + l = [] + append = l.append + for c in s: + o = ord(c) + if o >= 128: + append('\\%3o' % o) + else: + append(c) + return join_bytes(l).decode('ISO-8859-1') + +def split_string_literal(s, limit=2000): + # MSVC can't handle long string literals. + if len(s) < limit: + return s + else: + start = 0 + chunks = [] + while start < len(s): + end = start + limit + if len(s) > end-4 and '\\' in s[end-4:end]: + end -= 4 - s[end-4:end].find('\\') # just before the backslash + while s[end-1] == '\\': + end -= 1 + if end == start: + # must have been a long line of backslashes + end = start + limit - (limit % 2) - 4 + break + chunks.append(s[start:end]) + start = end + return '""'.join(chunks) + +def encode_pyunicode_string(s): + """Create Py_UNICODE[] representation of a given unicode string. + """ + s = list(map(ord, s)) + [0] + + if sys.maxunicode >= 0x10000: # Wide build or Py3.3 + utf16, utf32 = [], s + for code_point in s: + if code_point >= 0x10000: # outside of BMP + high, low = divmod(code_point - 0x10000, 1024) + utf16.append(high + 0xD800) + utf16.append(low + 0xDC00) + else: + utf16.append(code_point) + else: + utf16, utf32 = s, [] + for code_unit in s: + if 0xDC00 <= code_unit <= 0xDFFF and utf32 and 0xD800 <= utf32[-1] <= 0xDBFF: + high, low = utf32[-1], code_unit + utf32[-1] = ((high & 0x3FF) << 10) + (low & 0x3FF) + 0x10000 + else: + utf32.append(code_unit) + + if utf16 == utf32: + utf16 = [] + return ",".join(map(_unicode, utf16)), ",".join(map(_unicode, utf32)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/UtilityCode.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/UtilityCode.py new file mode 100644 index 0000000000000000000000000000000000000000..71dc3088b63d5779a2bbf0261461c438c9d72547 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/UtilityCode.py @@ -0,0 +1,266 @@ +from __future__ import absolute_import + +from .TreeFragment import parse_from_strings, StringParseContext +from . import Symtab +from . import Naming +from . import Code + + +class NonManglingModuleScope(Symtab.ModuleScope): + + def __init__(self, prefix, *args, **kw): + self.prefix = prefix + self.cython_scope = None + self.cpp = kw.pop('cpp', False) + Symtab.ModuleScope.__init__(self, *args, **kw) + + def add_imported_entry(self, name, entry, pos): + entry.used = True + return super(NonManglingModuleScope, self).add_imported_entry(name, entry, pos) + + def mangle(self, prefix, name=None): + if name: + if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix): + # Functions, classes etc. gets a manually defined prefix easily + # manually callable instead (the one passed to CythonUtilityCode) + prefix = self.prefix + return "%s%s" % (prefix, name) + else: + return Symtab.ModuleScope.mangle(self, prefix) + + +class CythonUtilityCodeContext(StringParseContext): + scope = None + + def find_module(self, module_name, from_module=None, pos=None, need_pxd=True, absolute_fallback=True, relative_import=False): + if from_module: + raise AssertionError("Relative imports not supported in utility code.") + if module_name != self.module_name: + if module_name not in self.modules: + raise AssertionError("Only the cython cimport is supported.") + else: + return self.modules[module_name] + + if self.scope is None: + self.scope = NonManglingModuleScope( + self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp) + + return self.scope + + +class CythonUtilityCode(Code.UtilityCodeBase): + """ + Utility code written in the Cython language itself. + + The @cname decorator can set the cname for a function, method of cdef class. + Functions decorated with @cname('c_func_name') get the given cname. + + For cdef classes the rules are as follows: + obj struct -> _obj + obj type ptr -> _type + methods -> _ + + For methods the cname decorator is optional, but without the decorator the + methods will not be prototyped. See Cython.Compiler.CythonScope and + tests/run/cythonscope.pyx for examples. + """ + + is_cython_utility = True + + def __init__(self, impl, name="__pyxutil", prefix="", requires=None, + file=None, from_scope=None, context=None, compiler_directives=None, + outer_module_scope=None): + # 1) We need to delay the parsing/processing, so that all modules can be + # imported without import loops + # 2) The same utility code object can be used for multiple source files; + # while the generated node trees can be altered in the compilation of a + # single file. + # Hence, delay any processing until later. + context_types = {} + if context is not None: + from .PyrexTypes import BaseType + for key, value in context.items(): + if isinstance(value, BaseType): + context[key] = key + context_types[key] = value + impl = Code.sub_tempita(impl, context, file, name) + self.impl = impl + self.name = name + self.file = file + self.prefix = prefix + self.requires = requires or [] + self.from_scope = from_scope + self.outer_module_scope = outer_module_scope + self.compiler_directives = compiler_directives + self.context_types = context_types + + def __eq__(self, other): + if isinstance(other, CythonUtilityCode): + return self._equality_params() == other._equality_params() + else: + return False + + def _equality_params(self): + outer_scope = self.outer_module_scope + while isinstance(outer_scope, NonManglingModuleScope): + outer_scope = outer_scope.outer_scope + return self.impl, outer_scope, self.compiler_directives + + def __hash__(self): + return hash(self.impl) + + def get_tree(self, entries_only=False, cython_scope=None): + from .AnalysedTreeTransforms import AutoTestDictTransform + # The AutoTestDictTransform creates the statement "__test__ = {}", + # which when copied into the main ModuleNode overwrites + # any __test__ in user code; not desired + excludes = [AutoTestDictTransform] + + from . import Pipeline, ParseTreeTransforms + context = CythonUtilityCodeContext( + self.name, compiler_directives=self.compiler_directives, + cpp=cython_scope.is_cpp() if cython_scope else False) + context.prefix = self.prefix + context.cython_scope = cython_scope + #context = StringParseContext(self.name) + tree = parse_from_strings( + self.name, self.impl, context=context, allow_struct_enum_decorator=True, + in_utility_code=True) + pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes) + + if entries_only: + p = [] + for t in pipeline: + p.append(t) + if isinstance(t, ParseTreeTransforms.AnalyseDeclarationsTransform): + break + + pipeline = p + + transform = ParseTreeTransforms.CnameDirectivesTransform(context) + # InterpretCompilerDirectives already does a cdef declarator check + #before = ParseTreeTransforms.DecoratorTransform + before = ParseTreeTransforms.InterpretCompilerDirectives + pipeline = Pipeline.insert_into_pipeline(pipeline, transform, + before=before) + + def merge_scope(scope): + def merge_scope_transform(module_node): + module_node.scope.merge_in(scope) + return module_node + return merge_scope_transform + + if self.from_scope: + pipeline = Pipeline.insert_into_pipeline( + pipeline, merge_scope(self.from_scope), + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + + for dep in self.requires: + if isinstance(dep, CythonUtilityCode) and hasattr(dep, 'tree') and not cython_scope: + pipeline = Pipeline.insert_into_pipeline( + pipeline, merge_scope(dep.tree.scope), + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + + if self.outer_module_scope: + # inject outer module between utility code module and builtin module + def scope_transform(module_node): + module_node.scope.outer_scope = self.outer_module_scope + return module_node + + pipeline = Pipeline.insert_into_pipeline( + pipeline, scope_transform, + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + + if self.context_types: + # inject types into module scope + def scope_transform(module_node): + dummy_entry = object() + for name, type in self.context_types.items(): + # Restore the old type entry after declaring the type. + # We need to access types in the scope, but this shouldn't alter the entry + # that is visible from everywhere else + old_type_entry = getattr(type, "entry", dummy_entry) + entry = module_node.scope.declare_type(name, type, None, visibility='extern') + if old_type_entry is not dummy_entry: + type.entry = old_type_entry + entry.in_cinclude = True + return module_node + + pipeline = Pipeline.insert_into_pipeline( + pipeline, scope_transform, + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + + (err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False) + assert not err, err + self.tree = tree + return tree + + def put_code(self, output): + pass + + @classmethod + def load_as_string(cls, util_code_name, from_file=None, **kwargs): + """ + Load a utility code as a string. Returns (proto, implementation) + """ + util = cls.load(util_code_name, from_file, **kwargs) + return util.proto, util.impl # keep line numbers => no lstrip() + + def declare_in_scope(self, dest_scope, used=False, cython_scope=None, + allowlist=None): + """ + Declare all entries from the utility code in dest_scope. Code will only + be included for used entries. If module_name is given, declare the + type entries with that name. + """ + tree = self.get_tree(entries_only=True, cython_scope=cython_scope) + + entries = tree.scope.entries + entries.pop('__name__') + entries.pop('__file__') + entries.pop('__builtins__') + entries.pop('__doc__') + + for entry in entries.values(): + entry.utility_code_definition = self + entry.used = used + + original_scope = tree.scope + dest_scope.merge_in(original_scope, merge_unused=True, allowlist=allowlist) + tree.scope = dest_scope + + for dep in self.requires: + if dep.is_cython_utility: + dep.declare_in_scope(dest_scope, cython_scope=cython_scope) + + return original_scope + + @staticmethod + def filter_inherited_directives(current_directives): + """ + Cython utility code should usually only pick up a few directives from the + environment (those that intentionally control its function) and ignore most + other compiler directives. This function provides a sensible default list + of directives to copy. + """ + from .Options import _directive_defaults + utility_code_directives = dict(_directive_defaults) + inherited_directive_names = ( + 'binding', 'always_allow_keywords', 'allow_none_for_extension_args', + 'auto_pickle', 'ccomplex', + 'c_string_type', 'c_string_encoding', + 'optimize.inline_defnode_calls', 'optimize.unpack_method_calls', + 'optimize.unpack_method_calls_in_pyinit', 'optimize.use_switch') + for name in inherited_directive_names: + if name in current_directives: + utility_code_directives[name] = current_directives[name] + return utility_code_directives + + +def declare_declarations_in_scope(declaration_string, env, private_type=True, + *args, **kwargs): + """ + Declare some declarations given as Cython code in declaration_string + in scope env. + """ + CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Version.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Version.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb561f78c0445a05d9eb49e40de8620ad2fef65 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Version.py @@ -0,0 +1,9 @@ +# for backwards compatibility + +from __future__ import absolute_import + +from .. import __version__ as version + +# For 'generated by' header line in C files. + +watermark = str(version) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/AutoDocTransforms.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/AutoDocTransforms.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6c86d73b4ab01c5d3351d872076eee1c4b99e82 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/AutoDocTransforms.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/DFA.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/DFA.py new file mode 100644 index 0000000000000000000000000000000000000000..66dc4a3798ba66e087e3a0e93de7c748efdf3fa0 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/DFA.py @@ -0,0 +1,149 @@ +# cython: auto_cpdef=True +""" +Python Lexical Analyser + +Converting NFA to DFA +""" +from __future__ import absolute_import + +from . import Machines +from .Machines import LOWEST_PRIORITY +from .Transitions import TransitionMap + + +def nfa_to_dfa(old_machine, debug=None): + """ + Given a nondeterministic Machine, return a new equivalent + Machine which is deterministic. + """ + # We build a new machine whose states correspond to sets of states + # in the old machine. Initially we add a new state corresponding to + # the epsilon-closure of each initial old state. Then we give transitions + # to each new state which are the union of all transitions out of any + # of the corresponding old states. The new state reached on a given + # character is the one corresponding to the set of states reachable + # on that character from any of the old states. As new combinations of + # old states are created, new states are added as needed until closure + # is reached. + new_machine = Machines.FastMachine() + state_map = StateMap(new_machine) + + # Seed the process using the initial states of the old machine. + # Make the corresponding new states into initial states of the new + # machine with the same names. + for (key, old_state) in old_machine.initial_states.items(): + new_state = state_map.old_to_new(epsilon_closure(old_state)) + new_machine.make_initial_state(key, new_state) + + # Tricky bit here: we add things to the end of this list while we're + # iterating over it. The iteration stops when closure is achieved. + for new_state in new_machine.states: + transitions = TransitionMap() + for old_state in state_map.new_to_old(new_state): + for event, old_target_states in old_state.transitions.items(): + if event and old_target_states: + transitions.add_set(event, set_epsilon_closure(old_target_states)) + for event, old_states in transitions.items(): + new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states)) + + if debug: + debug.write("\n===== State Mapping =====\n") + state_map.dump(debug) + return new_machine + + +def set_epsilon_closure(state_set): + """ + Given a set of states, return the union of the epsilon + closures of its member states. + """ + result = {} + for state1 in state_set: + for state2 in epsilon_closure(state1): + result[state2] = 1 + return result + + +def epsilon_closure(state): + """ + Return the set of states reachable from the given state + by epsilon moves. + """ + # Cache the result + result = state.epsilon_closure + if result is None: + result = {} + state.epsilon_closure = result + add_to_epsilon_closure(result, state) + return result + + +def add_to_epsilon_closure(state_set, state): + """ + Recursively add to |state_set| states reachable from the given state + by epsilon moves. + """ + if not state_set.get(state, 0): + state_set[state] = 1 + state_set_2 = state.transitions.get_epsilon() + if state_set_2: + for state2 in state_set_2: + add_to_epsilon_closure(state_set, state2) + + +class StateMap(object): + """ + Helper class used by nfa_to_dfa() to map back and forth between + sets of states from the old machine and states of the new machine. + """ + + def __init__(self, new_machine): + self.new_machine = new_machine # Machine + self.old_to_new_dict = {} # {(old_state,...) : new_state} + self.new_to_old_dict = {} # {id(new_state) : old_state_set} + + def old_to_new(self, old_state_set): + """ + Return the state of the new machine corresponding to the + set of old machine states represented by |state_set|. A new + state will be created if necessary. If any of the old states + are accepting states, the new state will be an accepting state + with the highest priority action from the old states. + """ + key = self.make_key(old_state_set) + new_state = self.old_to_new_dict.get(key, None) + if not new_state: + action = self.highest_priority_action(old_state_set) + new_state = self.new_machine.new_state(action) + self.old_to_new_dict[key] = new_state + self.new_to_old_dict[id(new_state)] = old_state_set + return new_state + + def highest_priority_action(self, state_set): + best_action = None + best_priority = LOWEST_PRIORITY + for state in state_set: + priority = state.action_priority + if priority > best_priority: + best_action = state.action + best_priority = priority + return best_action + + def new_to_old(self, new_state): + """Given a new state, return a set of corresponding old states.""" + return self.new_to_old_dict[id(new_state)] + + def make_key(self, state_set): + """ + Convert a set of states into a uniquified + sorted tuple suitable for use as a dictionary key. + """ + return tuple(sorted(state_set)) + + def dump(self, file): + from .Transitions import state_set_str + + for new_state in self.new_machine.states: + old_state_set = self.new_to_old_dict[id(new_state)] + file.write(" State %s <-- %s\n" % ( + new_state['number'], state_set_str(old_state_set))) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83bb9239abec1bc1da2f9aa85a266d73e264f6a1 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__init__.py @@ -0,0 +1,35 @@ +""" +Python Lexical Analyser + +The Plex module provides lexical analysers with similar capabilities +to GNU Flex. The following classes and functions are exported; +see the attached docstrings for more information. + + Scanner For scanning a character stream under the + direction of a Lexicon. + + Lexicon For constructing a lexical definition + to be used by a Scanner. + + Str, Any, AnyBut, AnyChar, Seq, Alt, Opt, Rep, Rep1, + Bol, Eol, Eof, Empty + + Regular expression constructors, for building pattern + definitions for a Lexicon. + + State For defining scanner states when creating a + Lexicon. + + TEXT, IGNORE, Begin + + Actions for associating with patterns when + creating a Lexicon. +""" +# flake8: noqa:F401 +from __future__ import absolute_import + +from .Actions import TEXT, IGNORE, Begin, Method +from .Lexicons import Lexicon, State +from .Regexps import RE, Seq, Alt, Rep1, Empty, Str, Any, AnyBut, AnyChar, Range +from .Regexps import Opt, Rep, Bol, Eol, Eof, Case, NoCase +from .Scanners import Scanner diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Actions.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Actions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..736c73f394cdb52bbc629daabd33c59aaa188a2c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Actions.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Lexicons.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Lexicons.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b5c0456131abd8087c7793c52708e4725b2d8d1 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Lexicons.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Regexps.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Regexps.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d973d61a0bf561343eb1a520fe2df151d9dfcfb Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Regexps.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Transitions.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Transitions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c95181bfb3f4f567a1059d6635011ac72f42dd5 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Transitions.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be2fd2498a273226ef31b073e31f5baa44c80125 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..41a0ce3d0efa247760db266bace8e34a4b5dd9fa --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3239b97cbc4bab9a3e22a6bd458b797daaeecd1 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/_looper.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/_looper.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb9ae4f1b92f7cd38558b4fcc3429793a231427 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/_looper.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/_tempita.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/_tempita.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a00a33f241023af28ea63e7c9cf03fadf5758ea Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/_tempita.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/compat3.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/compat3.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3054dee56bc9dabf33c93c5de909cf23ce86c23f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/__pycache__/compat3.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_tempita.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_tempita.py new file mode 100644 index 0000000000000000000000000000000000000000..284bb497987f625889a8506a2a515fec95c18f76 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_tempita.py @@ -0,0 +1,1091 @@ +# cython: language_level=3str + +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + +from __future__ import absolute_import + +import re +import sys +import os +import tokenize +from io import StringIO + +from ._looper import looper +from .compat3 import bytes, unicode_, basestring_, next, is_unicode, coerce_text + +__all__ = ['TemplateError', 'Template', 'sub', 'bunch'] + +in_re = re.compile(r'\s+in\s+') +var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) + + +class TemplateError(Exception): + """Exception raised while parsing a template + """ + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = ' '.join(self.args) + if self.position: + msg = '%s at line %s column %s' % ( + msg, self.position[0], self.position[1]) + if self.name: + msg += ' in %s' % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, + get_template=from_template.get_template) + + +class Template(object): + + default_namespace = { + 'start_braces': '{{', + 'end_braces': '}}', + 'looper': looper, + } + + default_encoding = 'utf8' + default_inherit = None + + def __init__(self, content, name=None, namespace=None, stacklevel=None, + get_template=None, default_inherit=None, line_offset=0, + delimiters=None, delimeters=None): + self.content = content + + # set delimiters + if delimeters: + import warnings + warnings.warn( + "'delimeters' kwarg is being deprecated in favor of correctly" + " spelled 'delimiters'. Please adjust your code.", + DeprecationWarning + ) + if delimiters is None: + delimiters = delimeters + if delimiters is None: + delimiters = (self.default_namespace['start_braces'], + self.default_namespace['end_braces']) + else: + #assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace['start_braces'] = delimiters[0] + self.default_namespace['end_braces'] = delimiters[1] + self.delimiters = self.delimeters = delimiters # Keep a legacy read-only copy, but don't use it. + + self._unicode = is_unicode(content) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__file__' in globals: + name = globals['__file__'] + if name.endswith('.pyc') or name.endswith('.pyo'): + name = name[:-1] + elif '__name__' in globals: + name = globals['__name__'] + else: + name = '' + if lineno: + name += ':%s' % lineno + self.name = name + self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename(cls, filename, namespace=None, encoding=None, + default_inherit=None, get_template=get_file_template): + with open(filename, 'rb') as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls(content=c, name=filename, namespace=namespace, + default_inherit=default_inherit, get_template=get_template) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return '<%s %s name=%r>' % ( + self.__class__.__name__, + hex(id(self))[2:], self.name) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError( + "You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError( + "You can only give one positional argument") + if not hasattr(args[0], 'items'): + raise TypeError( + "If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r" + % (args[0],)) + kw = args[0] + ns = kw + ns['__template_name__'] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if '__inherit__' in defs: + inherit = defs.pop('__inherit__') + else: + inherit = None + return ''.join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + 'You cannot use inheritance without passing in get_template', + position=None, name=self.name) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns['self'] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == 'py': + self._exec(code[2], ns, pos) + elif name == 'continue': + raise _TemplateContinue() + elif name == 'break': + raise _TemplateBreak() + elif name == 'for': + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == 'cond': + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == 'expr': + parts = code[2].split('|') + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == 'default': + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == 'inherit': + expr = code[2] + value = self._eval(expr, ns, pos) + defs['__inherit__'] = value + elif name == 'def': + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns, + pos=pos) + elif name == 'comment': + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + 'Need %i items to unpack (got %i items)' + % (len(vars), len(item))) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == 'else': + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError( + 'invalid syntax in expression: %s' % code) + return value + except Exception as e: + if getattr(e, 'args', None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return '' + if self._unicode: + try: + value = unicode_(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if (is_unicode(value) + and self.default_encoding): + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + 'Cannot decode bytes value %r into unicode ' + '(no default_encoding provided)' % value) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + ' in string %r' % value) + elif not self._unicode and is_unicode(value): + if not self.default_encoding: + raise UnicodeEncodeError( + 'Cannot encode unicode value %r into bytes ' + '(no default_encoding provided)' % value) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % ( + msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get('__name') + delimeters = kw.pop('delimeters') if 'delimeters' in kw else None # for legacy code + tmpl = Template(content, name=name, delimiters=delimiters, delimeters=delimeters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if 'default' in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, 'default') + else: + return dict.__getitem__(self, key) + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, + ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())])) + + +class TemplateDef(object): + def __init__(self, template, func_name, func_signature, + body, ns, pos, bound_self=None): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return '' % ( + self._func_name, self._func_signature, + self._template.name, self._pos) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns['self'] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return ''.join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, self._func_name, self._func_signature, + self._body, self._ns, self._pos, bound_self=obj) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError( + 'Unexpected argument %s' % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + 'Extra position arguments: %s' + % ', '.join([repr(v) for v in args])) + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval( + value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError( + 'Missing argument: %s' % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject(object): + + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter(object): + + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return '<%s around %r>' % (self.__class__.__name__, self.__template_obj) + + +class _Empty(object): + def __call__(self, *args, **kw): + return self + + def __str__(self): + return '' + + def __repr__(self): + return 'Empty' + + def __unicode__(self): + return u'' + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + if sys.version < "3": + __nonzero__ = __bool__ + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( Template.default_namespace['start_braces'], + Template.default_namespace['end_braces'] ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]), + re.escape(delimiters[1]))) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError('%s inside expression' % delimiters[0], + position=pos, + name=name) + elif expr == delimiters[1] and not in_expr: + raise TemplateError('%s outside expression' % delimiters[1], + position=pos, + name=name) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last:match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError('No %s to finish last expression' % delimiters[1], + name=name, position=last_pos) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + +statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') +single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] +trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') +lead_whitespace_re = re.compile(r'^[\t ]*\n') + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = '' + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = '' + else: + next_chunk = tokens[i + 1] + if (not isinstance(next_chunk, basestring_) + or not isinstance(prev, basestring_)): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = 'last' + if (prev_ok + and (not next_chunk or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()))): + if prev: + if ((i == 1 and not prev.strip()) + or prev_ok == 'last'): + tokens[i - 1] = '' + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[:m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = '' + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count('\n', last_index, index) + if lines > 0: + column = index - string.rfind('\n', last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ + if delimiters is None: + delimiters = ( Template.default_namespace['start_braces'], + Template.default_namespace['end_braces'] ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith('py:'): + expr = expr[3:].lstrip(' \t') + if expr.startswith('\n') or expr.startswith('\r'): + expr = expr.lstrip('\r\n') + if '\r' in expr: + expr = expr.replace('\r\n', '\n') + expr = expr.replace('\r', '') + expr += '\n' + else: + if '\n' in expr: + raise TemplateError( + 'Multi-line py blocks must start with a newline', + position=pos, name=name) + return ('py', pos, expr), tokens[1:] + elif expr in ('continue', 'break'): + if 'for' not in context: + raise TemplateError( + 'continue outside of for loop', + position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith('if '): + return parse_cond(tokens, name, context) + elif (expr.startswith('elif ') + or expr == 'else'): + raise TemplateError( + '%s outside of an if block' % expr.split()[0], + position=pos, name=name) + elif expr in ('if', 'elif', 'for'): + raise TemplateError( + '%s with no expression' % expr, + position=pos, name=name) + elif expr in ('endif', 'endfor', 'enddef'): + raise TemplateError( + 'Unexpected %s' % expr, + position=pos, name=name) + elif expr.startswith('for '): + return parse_for(tokens, name, context) + elif expr.startswith('default '): + return parse_default(tokens, name, context) + elif expr.startswith('inherit '): + return parse_inherit(tokens, name, context) + elif expr.startswith('def '): + return parse_def(tokens, name, context) + elif expr.startswith('#'): + return ('comment', pos, tokens[0][0]), tokens[1:] + return ('expr', pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ('if',) + while 1: + if not tokens: + raise TemplateError( + 'Missing {{endif}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'endif'): + return ('cond', start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(':'): + first = first[:-1] + if first.startswith('if '): + part = ('if', pos, first[3:].lstrip(), content) + elif first.startswith('elif '): + part = ('elif', pos, first[5:].lstrip(), content) + elif first == 'else': + part = ('else', pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError( + 'No {{endif}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) + and (tokens[0][0] == 'endif' + or tokens[0][0].startswith('elif ') + or tokens[0][0] == 'else')): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ('for',) + context + content = [] + assert first.startswith('for '), first + if first.endswith(':'): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError( + 'Bad for (no "in") in %r' % first, + position=pos, name=name) + vars = first[:match.start()] + if '(' in vars: + raise TemplateError( + 'You cannot have () in the variable section of a for loop (%r)' + % vars, position=pos, name=name) + vars = tuple([ + v.strip() for v in first[:match.start()].split(',') + if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError( + 'No {{endfor}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'endfor'): + return ('for', pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('default ') + first = first.split(None, 1)[1] + parts = first.split('=', 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % first, + position=pos, name=name) + var = parts[0].strip() + if ',' in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", + position=pos, name=name) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" + % var, position=pos, name=name) + expr = parts[1].strip() + return ('default', pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('inherit ') + expr = first.split(None, 1)[1] + return ('inherit', pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith('def ') + first = first.split(None, 1)[1] + if first.endswith(':'): + first = first[:-1] + if '(' not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(')'): + raise TemplateError("Function definition doesn't end with ): %s" % first, + position=start, name=name) + else: + first = first[:-1] + func_name, sig_text = first.split('(', 1) + sig = parse_signature(sig_text, name, start) + context = context + ('def',) + content = [] + while 1: + if not tokens: + raise TemplateError( + 'Missing {{enddef}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'enddef'): + return ('def', start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, '' + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','): + if var_arg_type == '*': + var_arg = var_name + elif var_arg_type == '**': + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if tok_type == tokenize.OP and tok_string == '=': + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if (not nest_count and + (tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type: + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'): + nest_type = tok_string + nest_count = 1 + unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow+1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return ''.join(parts) + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution('Paste') + parser = optparse.OptionParser( + version=coerce_text(dist), + usage=_fill_command_usage) + parser.add_option( + '-o', '--output', + dest='output', + metavar="FILENAME", + help="File to write output to (default stdout)") + parser.add_option( + '--env', + dest='use_env', + action='store_true', + help="Put the environment in as top-level variables") + options, args = parser.parse_args(args) + if len(args) < 1: + print('You must give a template filename') + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if '=' not in value: + print('Bad argument: %r' % value) + sys.exit(2) + name, value = value.split('=', 1) + if name.startswith('py:'): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == '-': + template_content = sys.stdin.read() + template_name = '' + else: + with open(template_name, 'rb') as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, 'wb') as f: + f.write(result) + else: + sys.stdout.write(result) + +if __name__ == '__main__': + fill_command() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/projection.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/projection.py new file mode 100644 index 0000000000000000000000000000000000000000..57f960e13b3befbb575b7b318883cc81aafbecd8 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/bipartite/projection.py @@ -0,0 +1,528 @@ +"""One-mode (unipartite) projections of bipartite graphs.""" +import networkx as nx +from networkx.exception import NetworkXAlgorithmError +from networkx.utils import not_implemented_for + +__all__ = [ + "projected_graph", + "weighted_projected_graph", + "collaboration_weighted_projected_graph", + "overlap_weighted_projected_graph", + "generic_weighted_projected_graph", +] + + +@nx._dispatch(graphs="B", preserve_node_attrs=True, preserve_graph_attrs=True) +def projected_graph(B, nodes, multigraph=False): + r"""Returns the projection of B onto one of its node sets. + + Returns the graph G that is the projection of the bipartite graph B + onto the specified nodes. They retain their attributes and are connected + in G if they have a common neighbor in B. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + multigraph: bool (default=False) + If True return a multigraph where the multiple edges represent multiple + shared neighbors. They edge key in the multigraph is assigned to the + label of the neighbor. + + Returns + ------- + Graph : NetworkX graph or multigraph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges()) + [(1, 3)] + + If nodes `a`, and `b` are connected through both nodes 1 and 2 then + building a multigraph results in two edges in the projection onto + [`a`, `b`]: + + >>> B = nx.Graph() + >>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)]) + >>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True) + >>> print([sorted((u, v)) for u, v in G.edges()]) + [['a', 'b'], ['a', 'b']] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + Returns a simple graph that is the projection of the bipartite graph B + onto the set of nodes given in list nodes. If multigraph=True then + a multigraph is returned with an edge for every shared neighbor. + + Directed graphs are allowed as input. The output will also then + be a directed graph with edges if there is a directed path between + the nodes. + + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + """ + if B.is_multigraph(): + raise nx.NetworkXError("not defined for multigraphs") + if B.is_directed(): + directed = True + if multigraph: + G = nx.MultiDiGraph() + else: + G = nx.DiGraph() + else: + directed = False + if multigraph: + G = nx.MultiGraph() + else: + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u} + if multigraph: + for n in nbrs2: + if directed: + links = set(B[u]) & set(B.pred[n]) + else: + links = set(B[u]) & set(B[n]) + for l in links: + if not G.has_edge(u, n, l): + G.add_edge(u, n, key=l) + else: + G.add_edges_from((u, n) for n in nbrs2) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def weighted_projected_graph(B, nodes, ratio=False): + r"""Returns a weighted projection of B onto one of its node sets. + + The weighted projected graph is the projection of the bipartite + network B onto the specified nodes with weights representing the + number of shared neighbors or the ratio between actual shared + neighbors and possible shared neighbors if ``ratio is True`` [1]_. + The nodes retain their attributes and are connected in the resulting + graph if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Distinct nodes to project onto (the "bottom" nodes). + + ratio: Bool (default=False) + If True, edge weight is the ratio between actual shared neighbors + and maximum possible shared neighbors (i.e., the size of the other + node set). If False, edges weight is the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.weighted_projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 1})] + >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True) + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 0.5})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite, or that + the input nodes are distinct. However, if the length of the input nodes is + greater than or equal to the nodes in the graph B, an exception is raised. + If the nodes are not distinct but don't raise this error, the output weights + will be incorrect. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + n_top = len(B) - len(nodes) + + if n_top < 1: + raise NetworkXAlgorithmError( + f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n" + "They are either not a valid bipartite partition or contain duplicates" + ) + + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + common = unbrs & vnbrs + if not ratio: + weight = len(common) + else: + weight = len(common) / n_top + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def collaboration_weighted_projected_graph(B, nodes): + r"""Newman's weighted projection of B onto one of its node sets. + + The collaboration weighted projection is the projection of the + bipartite network B onto the specified nodes with weights assigned + using Newman's collaboration model [1]_: + + .. math:: + + w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1} + + where `u` and `v` are nodes from the bottom bipartite node set, + and `k` is a node of the top node set. + The value `d_k` is the degree of node `k` in the bipartite + network and `\delta_{u}^{k}` is 1 if node `u` is + linked to node `k` in the original bipartite graph or 0 otherwise. + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite + graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> B.add_edge(1, 5) + >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5]) + >>> list(G) + [0, 2, 4, 5] + >>> for edge in sorted(G.edges(data=True)): + ... print(edge) + ... + (0, 2, {'weight': 0.5}) + (0, 5, {'weight': 0.5}) + (2, 4, {'weight': 1.0}) + (2, 5, {'weight': 0.5}) + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Scientific collaboration networks: II. + Shortest paths, weighted networks, and centrality, + M. E. J. Newman, Phys. Rev. E 64, 016132 (2001). + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u} + for v in nbrs2: + vnbrs = set(pred[v]) + common_degree = (len(B[n]) for n in unbrs & vnbrs) + weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1) + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def overlap_weighted_projected_graph(B, nodes, jaccard=True): + r"""Overlap weighted projection of B onto one of its node sets. + + The overlap weighted projection is the projection of the bipartite + network B onto the specified nodes with weights representing + the Jaccard index between the neighborhoods of the two nodes in the + original bipartite network [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} + + or if the parameter 'jaccard' is False, the fraction of common + neighbors by minimum of both nodes degree in the original + bipartite graph [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)} + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + jaccard: Bool (default=True) + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> nodes = [0, 2, 4] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes) + >>> list(G) + [0, 2, 4] + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False) + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation + Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + if jaccard: + wt = len(unbrs & vnbrs) / len(unbrs | vnbrs) + else: + wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs)) + G.add_edge(u, v, weight=wt) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B", preserve_all_attrs=True) +def generic_weighted_projected_graph(B, nodes, weight_function=None): + r"""Weighted projection of B with a user-specified weight function. + + The bipartite network B is projected on to the specified nodes + with weights computed by a user-specified function. This function + must accept as a parameter the neighborhood sets of two nodes and + return an integer or a float. + + The nodes retain their attributes and are connected in the resulting graph + if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + weight_function : function + This function must accept as parameters the same input graph + that this function, and two nodes; and return an integer or a float. + The default function computes the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> # Define some custom weight functions + >>> def jaccard(G, u, v): + ... unbrs = set(G[u]) + ... vnbrs = set(G[v]) + ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) + ... + >>> def my_weight(G, u, v, weight="weight"): + ... w = 0 + ... for nbr in set(G[u]) & set(G[v]): + ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1) + ... return w + ... + >>> # A complete bipartite graph with 4 nodes and 4 edges + >>> B = nx.complete_bipartite_graph(2, 2) + >>> # Add some arbitrary weight to the edges + >>> for i, (u, v) in enumerate(B.edges()): + ... B.edges[u, v]["weight"] = i + 1 + ... + >>> for edge in B.edges(data=True): + ... print(edge) + ... + (0, 2, {'weight': 1}) + (0, 3, {'weight': 2}) + (1, 2, {'weight': 3}) + (1, 3, {'weight': 4}) + >>> # By default, the weight is the number of shared neighbors + >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 2})] + >>> # To specify a custom weight function use the weight_function parameter + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=jaccard + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 1.0})] + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=my_weight + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 10})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + projected_graph + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + if weight_function is None: + + def weight_function(G, u, v): + # Notice that we use set(pred[v]) for handling the directed case. + return len(set(G[u]) & set(pred[v])) + + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u} + for v in nbrs2: + weight = weight_function(B, u, v) + G.add_edge(u, v, weight=weight) + return G diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1c559198ce5c20582c169f47e731a47f44e0cc7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c26a45d54d90c9240f6fa49a68bdd001287bbb49 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ec48c0caf04570e7f6175fa4f912b83c43d5c4 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc59c43c89e988324d14ad41f2ab73c501d436a0 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e7db09f0b17a4963b861b4180efc5f6005ac5e7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6eee9144285894f3baf95135c7c82e8ca681f75a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..858e67889a11970bda4ef59ec72a60651bc59a08 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81563d88b3af81a0971aed4af39fd67af1a1fd74 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96333dc46dbed0f27ca082b1acef3ba107beec9c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e12768d335c032a1c0abcddeac5ee05fc2d7f19 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..93e6cdd08ca959835cc5c5f9a3e6dc353f4b217d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__init__.py @@ -0,0 +1,5 @@ +from .beamsearch import * +from .breadth_first_search import * +from .depth_first_search import * +from .edgedfs import * +from .edgebfs import * diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b1f074d34630ccb3a90addf73a07914cfbb83a2 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1356ec62fbf3029a95a6a028b3981eed1e788db Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff9e00012c6e32f5d8fb6371c561c507f1a0868d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae912c260b9f0cc7cbad2daabc25ff8fba84a19 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/beamsearch.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/beamsearch.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe93669a3a18f50e8b46ffa8c667db1e2d4a97a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/beamsearch.py @@ -0,0 +1,106 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" +import networkx as nx + +from .breadth_first_search import generic_bfs_edges + +__all__ = ["bfs_beam_edges"] + + +@nx._dispatch +def bfs_beam_edges(G, source, value, width=None): + """Iterates over edges in a beam search. + + The beam search is a generalized breadth-first search in which only + the "best" *w* neighbors of the current node are enqueued, where *w* + is the beam width and "best" is an application-specific + heuristic. In general, a beam search with a small beam width might + not visit each node in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + value : function + A function that takes a node of the graph as input and returns a + real number indicating how "good" it is. A higher value means it + is more likely to be visited sooner during the search. When + visiting a new node, only the `width` neighbors with the highest + `value` are enqueued (in decreasing order of `value`). + + width : int (default = None) + The beam width for the search. This is the number of neighbors + (ordered by `value`) to enqueue when visiting each new node. + + Yields + ------ + edge + Edges in the beam search starting from `source`, given as a pair + of nodes. + + Examples + -------- + To give nodes with, for example, a higher centrality precedence + during the search, set the `value` function to return the centrality + value of the node: + + >>> G = nx.karate_club_graph() + >>> centrality = nx.eigenvector_centrality(G) + >>> source = 0 + >>> width = 5 + >>> for u, v in nx.bfs_beam_edges(G, source, centrality.get, width): + ... print((u, v)) + ... + (0, 2) + (0, 1) + (0, 8) + (0, 13) + (0, 3) + (2, 32) + (1, 30) + (8, 33) + (3, 7) + (32, 31) + (31, 28) + (31, 25) + (25, 23) + (25, 24) + (23, 29) + (23, 27) + (29, 26) + """ + + if width is None: + width = len(G) + + def successors(v): + """Returns a list of the best neighbors of a node. + + `v` is a node in the graph `G`. + + The "best" neighbors are chosen according to the `value` + function (higher is better). Only the `width` best neighbors of + `v` are returned. + + The list returned by this function is in decreasing value as + measured by the `value` function. + + """ + # TODO The Python documentation states that for small values, it + # is better to use `heapq.nlargest`. We should determine the + # threshold at which its better to use `heapq.nlargest()` + # instead of `sorted()[:]` and apply that optimization here. + # + # If `width` is greater than the number of neighbors of `v`, all + # neighbors are returned by the semantics of slicing in + # Python. This occurs in the special case that the user did not + # specify a `width`: in this case all neighbors are always + # returned, so this is just a (slower) implementation of + # `bfs_edges(G, source)` but with a sorted enqueue step. + return iter(sorted(G.neighbors(v), key=value, reverse=True)[:width]) + + yield from generic_bfs_edges(G, source, successors) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e015b0874066ef039006f8b3e6c6289e5be63e8 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ded19ea275ca6d6d5d1bd9862906adb89df74df3 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py new file mode 100644 index 0000000000000000000000000000000000000000..8945b418457b0c5c31b956f40a9c4ded2a439d9d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py @@ -0,0 +1,32 @@ +"""Unit tests for the beam search functions.""" + +import networkx as nx + + +def identity(x): + return x + + +class TestBeamSearch: + """Unit tests for the beam search function.""" + + def test_narrow(self): + """Tests that a narrow beam width may cause an incomplete search.""" + # In this search, we enqueue only the neighbor 3 at the first + # step, then only the neighbor 2 at the second step. Once at + # node 2, the search chooses node 3, since it has a higher value + # that node 1, but node 3 has already been visited, so the + # search terminates. + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=1) + assert list(edges) == [(0, 3), (3, 2)] + + def test_wide(self): + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=2) + assert list(edges) == [(0, 3), (0, 1), (3, 2)] + + def test_width_none(self): + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=None) + assert list(edges) == [(0, 3), (0, 1), (3, 2)] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_bfs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_bfs.py new file mode 100644 index 0000000000000000000000000000000000000000..f9207cdd21a649b711f912b77dbc79cdb1152b2a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_bfs.py @@ -0,0 +1,212 @@ +from functools import partial + +import pytest + +import networkx as nx + + +class TestBFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + cls.G = G + + def test_successor(self): + assert dict(nx.bfs_successors(self.G, source=0)) == {0: [1], 1: [2, 3], 2: [4]} + + def test_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=0)) == {1: 0, 2: 1, 3: 1, 4: 2} + + def test_bfs_tree(self): + T = nx.bfs_tree(self.G, source=0) + assert sorted(T.nodes()) == sorted(self.G.nodes()) + assert sorted(T.edges()) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges_reverse(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + edges = nx.bfs_edges(D, source=4, reverse=True) + assert list(edges) == [(4, 2), (4, 3), (2, 1), (1, 0)] + + def test_bfs_edges_sorting(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (0, 2), (1, 4), (1, 3), (2, 5)]) + sort_desc = partial(sorted, reverse=True) + edges_asc = nx.bfs_edges(D, source=0, sort_neighbors=sorted) + edges_desc = nx.bfs_edges(D, source=0, sort_neighbors=sort_desc) + assert list(edges_asc) == [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5)] + assert list(edges_desc) == [(0, 2), (0, 1), (2, 5), (1, 4), (1, 3)] + + def test_bfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.bfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + + def test_bfs_layers(self): + expected = { + 0: [0], + 1: [1], + 2: [2, 3], + 3: [4], + } + assert dict(enumerate(nx.bfs_layers(self.G, sources=[0]))) == expected + assert dict(enumerate(nx.bfs_layers(self.G, sources=0))) == expected + + def test_bfs_layers_missing_source(self): + with pytest.raises(nx.NetworkXError): + next(nx.bfs_layers(self.G, sources="abc")) + with pytest.raises(nx.NetworkXError): + next(nx.bfs_layers(self.G, sources=["abc"])) + + def test_descendants_at_distance(self): + for distance, descendants in enumerate([{0}, {1}, {2, 3}, {4}]): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + + def test_descendants_at_distance_missing_source(self): + with pytest.raises(nx.NetworkXError): + nx.descendants_at_distance(self.G, "abc", 0) + + def test_bfs_labeled_edges_directed(self): + D = nx.cycle_graph(5, create_using=nx.DiGraph) + expected = [ + (0, 1, "tree"), + (1, 2, "tree"), + (2, 3, "tree"), + (3, 4, "tree"), + (4, 0, "reverse"), + ] + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + D.add_edge(4, 4) + expected.append((4, 4, "level")) + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + D.add_edge(0, 2) + D.add_edge(1, 5) + D.add_edge(2, 5) + D.remove_edge(4, 4) + expected = [ + (0, 1, "tree"), + (0, 2, "tree"), + (1, 2, "level"), + (1, 5, "tree"), + (2, 3, "tree"), + (2, 5, "forward"), + (3, 4, "tree"), + (4, 0, "reverse"), + ] + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + G = D.to_undirected() + G.add_edge(4, 4) + expected = [ + (0, 1, "tree"), + (0, 2, "tree"), + (0, 4, "tree"), + (1, 2, "level"), + (1, 5, "tree"), + (2, 3, "tree"), + (2, 5, "forward"), + (4, 3, "forward"), + (4, 4, "level"), + ] + answer = list(nx.bfs_labeled_edges(G, 0)) + assert expected == answer + + +class TestBreadthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_limited_bfs_successor(self): + assert dict(nx.bfs_successors(self.G, source=1, depth_limit=3)) == { + 1: [0, 2], + 2: [3, 7], + 3: [4], + 7: [8], + } + result = { + n: sorted(s) for n, s in nx.bfs_successors(self.D, source=7, depth_limit=2) + } + assert result == {8: [9], 2: [3], 7: [2, 8]} + + def test_limited_bfs_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=1, depth_limit=3)) == { + 0: 1, + 2: 1, + 3: 2, + 4: 3, + 7: 2, + 8: 7, + } + assert dict(nx.bfs_predecessors(self.D, source=7, depth_limit=2)) == { + 2: 7, + 3: 2, + 8: 7, + 9: 8, + } + + def test_limited_bfs_tree(self): + T = nx.bfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_limited_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (9, 10), (8, 7), (7, 2), (2, 1), (2, 3)] + + def test_limited_bfs_layers(self): + assert dict(enumerate(nx.bfs_layers(self.G, sources=[0]))) == { + 0: [0], + 1: [1], + 2: [2], + 3: [3, 7], + 4: [4, 8], + 5: [5, 9], + 6: [6, 10], + } + assert dict(enumerate(nx.bfs_layers(self.D, sources=2))) == { + 0: [2], + 1: [3, 7], + 2: [8], + 3: [9], + 4: [10], + } + + def test_limited_descendants_at_distance(self): + for distance, descendants in enumerate( + [{0}, {1}, {2}, {3, 7}, {4, 8}, {5, 9}, {6, 10}] + ): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + for distance, descendants in enumerate([{2}, {3, 7}, {8}, {9}, {10}]): + assert nx.descendants_at_distance(self.D, 2, distance) == descendants + + +def test_deprecations(): + G = nx.Graph([(1, 2)]) + generic_bfs = nx.breadth_first_search.generic_bfs_edges + with pytest.deprecated_call(): + list(generic_bfs(G, source=1, sort_neighbors=sorted)) + with pytest.deprecated_call(): + list(generic_bfs(G, source=1, neighbors=G.neighbors, sort_neighbors=sorted)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_dfs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_dfs.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb698b0f2da734c564c83a2d23ff7f191c7137a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_dfs.py @@ -0,0 +1,251 @@ +import networkx as nx + + +class TestDFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 0), (0, 4)]) + cls.G = G + # simple graph, disconnected + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + cls.D = D + + def test_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0)) == [0, 1, 2, 4, 3] + assert list(nx.dfs_preorder_nodes(self.D)) == [0, 1, 2, 3] + assert list(nx.dfs_preorder_nodes(self.D, source=2)) == [2, 3] + + def test_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=0)) == [4, 2, 3, 1, 0] + assert list(nx.dfs_postorder_nodes(self.D)) == [1, 0, 3, 2] + assert list(nx.dfs_postorder_nodes(self.D, source=0)) == [1, 0] + + def test_successor(self): + assert nx.dfs_successors(self.G, source=0) == {0: [1], 1: [2, 3], 2: [4]} + assert nx.dfs_successors(self.G, source=1) == {0: [3, 4], 1: [0], 4: [2]} + assert nx.dfs_successors(self.D) == {0: [1], 2: [3]} + assert nx.dfs_successors(self.D, source=1) == {1: [0]} + + def test_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2} + assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2} + + def test_dfs_tree(self): + exp_nodes = sorted(self.G.nodes()) + exp_edges = [(0, 1), (1, 2), (1, 3), (2, 4)] + # Search from first node + T = nx.dfs_tree(self.G, source=0) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None + T = nx.dfs_tree(self.G, source=None) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None is the default + T = nx.dfs_tree(self.G) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + + def test_dfs_edges(self): + edges = nx.dfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (2, 4), (1, 3)] + edges = nx.dfs_edges(self.D) + assert list(edges) == [(0, 1), (2, 3)] + + def test_dfs_labeled_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=0)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (1, 2), (2, 4), (1, 3)] + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (1, 2, "forward"), + (2, 1, "nontree"), + (2, 4, "forward"), + (4, 2, "nontree"), + (4, 0, "nontree"), + (2, 4, "reverse"), + (1, 2, "reverse"), + (1, 3, "forward"), + (3, 1, "nontree"), + (3, 0, "nontree"), + (1, 3, "reverse"), + (0, 1, "reverse"), + (0, 3, "nontree"), + (0, 4, "nontree"), + (0, 0, "reverse"), + ] + + def test_dfs_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (2, 2), (2, 3)] + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (0, 1, "reverse"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (3, 2, "nontree"), + (2, 3, "reverse"), + (2, 2, "reverse"), + ] + + def test_dfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.dfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + T = nx.dfs_tree(G, source=None) + assert sorted(T.nodes()) == [1, 2] + assert sorted(T.edges()) == [] + + +class TestDepthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_dls_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0, depth_limit=2)) == [0, 1, 2] + assert list(nx.dfs_preorder_nodes(self.D, source=1, depth_limit=2)) == ([1, 0]) + + def test_dls_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=3, depth_limit=3)) == [ + 1, + 7, + 2, + 5, + 4, + 3, + ] + assert list(nx.dfs_postorder_nodes(self.D, source=2, depth_limit=2)) == ( + [3, 7, 2] + ) + + def test_dls_successor(self): + result = nx.dfs_successors(self.G, source=4, depth_limit=3) + assert {n: set(v) for n, v in result.items()} == { + 2: {1, 7}, + 3: {2}, + 4: {3, 5}, + 5: {6}, + } + result = nx.dfs_successors(self.D, source=7, depth_limit=2) + assert {n: set(v) for n, v in result.items()} == {8: {9}, 2: {3}, 7: {8, 2}} + + def test_dls_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0, depth_limit=3) == { + 1: 0, + 2: 1, + 3: 2, + 7: 2, + } + assert nx.dfs_predecessors(self.D, source=2, depth_limit=3) == { + 8: 7, + 9: 8, + 3: 2, + 7: 2, + } + + def test_dls_tree(self): + T = nx.dfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_dls_edges(self): + edges = nx.dfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (8, 7), (7, 2), (2, 1), (2, 3), (9, 10)] + + def test_dls_labeled_edges_depth_1(self): + edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(5, 5), (5, 4), (5, 6)] + # Note: reverse-depth_limit edge types were not reported before gh-6240 + assert edges == [ + (5, 5, "forward"), + (5, 4, "forward"), + (5, 4, "reverse-depth_limit"), + (5, 6, "forward"), + (5, 6, "reverse-depth_limit"), + (5, 5, "reverse"), + ] + + def test_dls_labeled_edges_depth_2(self): + edges = list(nx.dfs_labeled_edges(self.G, source=6, depth_limit=2)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(6, 6), (6, 5), (5, 4)] + assert edges == [ + (6, 6, "forward"), + (6, 5, "forward"), + (5, 4, "forward"), + (5, 4, "reverse-depth_limit"), + (5, 6, "nontree"), + (6, 5, "reverse"), + (6, 6, "reverse"), + ] + + def test_dls_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D, depth_limit=1)) + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (0, 1, "reverse-depth_limit"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (2, 3, "reverse-depth_limit"), + (2, 7, "forward"), + (2, 7, "reverse-depth_limit"), + (2, 2, "reverse"), + (8, 8, "forward"), + (8, 7, "nontree"), + (8, 9, "forward"), + (8, 9, "reverse-depth_limit"), + (8, 8, "reverse"), + (10, 10, "forward"), + (10, 9, "nontree"), + (10, 10, "reverse"), + ] + # large depth_limit has no impact + edges = list(nx.dfs_labeled_edges(self.D, depth_limit=19)) + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (0, 1, "reverse"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (3, 2, "nontree"), + (2, 3, "reverse"), + (2, 7, "forward"), + (7, 2, "nontree"), + (7, 8, "forward"), + (8, 7, "nontree"), + (8, 9, "forward"), + (9, 8, "nontree"), + (9, 10, "forward"), + (10, 9, "nontree"), + (9, 10, "reverse"), + (8, 9, "reverse"), + (7, 8, "reverse"), + (2, 7, "reverse"), + (2, 2, "reverse"), + ] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py new file mode 100644 index 0000000000000000000000000000000000000000..7c1967cce04b3a0c9db80f9af39d7b1dfd8ef4cb --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py @@ -0,0 +1,131 @@ +import pytest + +import networkx as nx +from networkx.algorithms import edge_dfs +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + +# These tests can fail with hash randomization. The easiest and clearest way +# to write these unit tests is for the edges to be output in an expected total +# order, but we cannot guarantee the order amongst outgoing edges from a node, +# unless each class uses an ordered data structure for neighbors. This is +# painful to do with the current API. The alternative is that the tests are +# written (IMO confusingly) so that there is not a total order over the edges, +# but only a partial order. Due to the small size of the graphs, hopefully +# failures due to hash randomization will not occur. For an example of how +# this can fail, see TestEdgeDFS.test_multigraph. + + +class TestEdgeDFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(edge_dfs(G)) + assert edges == [] + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = edge_dfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, FORWARD), (3, 1, FORWARD)] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, FORWARD), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7120d4bc7ef25279b68eaa23690b6ff4574ed676 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/__init__.py @@ -0,0 +1,6 @@ +from .branchings import * +from .coding import * +from .mst import * +from .recognition import * +from .operations import * +from .decomposition import * diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d6d26fa0cb124c96f3d6445cbd4ed239a71df5d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2c17f97baf86cf2104f70aa8962b7b88efb78b4 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d5849d5b19d91d0e85ca04650686c9e8ccfeb5e Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_branchings.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_branchings.py new file mode 100644 index 0000000000000000000000000000000000000000..ad1cc33b9a494577638dc34b45736f9f3d1a23fe --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_branchings.py @@ -0,0 +1,632 @@ +import math +from operator import itemgetter + +import pytest + +np = pytest.importorskip("numpy") + +import networkx as nx +from networkx.algorithms.tree import branchings, recognition + +# +# Explicitly discussed examples from Edmonds paper. +# + +# Used in Figures A-F. +# +# fmt: off +G_array = np.array([ + # 0 1 2 3 4 5 6 7 8 + [0, 0, 12, 0, 12, 0, 0, 0, 0], # 0 + [4, 0, 0, 0, 0, 13, 0, 0, 0], # 1 + [0, 17, 0, 21, 0, 12, 0, 0, 0], # 2 + [5, 0, 0, 0, 17, 0, 18, 0, 0], # 3 + [0, 0, 0, 0, 0, 0, 0, 12, 0], # 4 + [0, 0, 0, 0, 0, 0, 14, 0, 12], # 5 + [0, 0, 21, 0, 0, 0, 0, 0, 15], # 6 + [0, 0, 0, 19, 0, 0, 15, 0, 0], # 7 + [0, 0, 0, 0, 0, 0, 0, 18, 0], # 8 +], dtype=int) + +# Two copies of the graph from the original paper as disconnected components +G_big_array = np.zeros(np.array(G_array.shape) * 2, dtype=int) +G_big_array[:G_array.shape[0], :G_array.shape[1]] = G_array +G_big_array[G_array.shape[0]:, G_array.shape[1]:] = G_array + +# fmt: on + + +def G1(): + G = nx.from_numpy_array(G_array, create_using=nx.MultiDiGraph) + return G + + +def G2(): + # Now we shift all the weights by -10. + # Should not affect optimal arborescence, but does affect optimal branching. + Garr = G_array.copy() + Garr[np.nonzero(Garr)] -= 10 + G = nx.from_numpy_array(Garr, create_using=nx.MultiDiGraph) + return G + + +# An optimal branching for G1 that is also a spanning arborescence. So it is +# also an optimal spanning arborescence. +# +optimal_arborescence_1 = [ + (0, 2, 12), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 4, 17), + (3, 6, 18), + (6, 8, 15), + (8, 7, 18), +] + +# For G2, the optimal branching of G1 (with shifted weights) is no longer +# an optimal branching, but it is still an optimal spanning arborescence +# (just with shifted weights). An optimal branching for G2 is similar to what +# appears in figure G (this is greedy_subopt_branching_1a below), but with the +# edge (3, 0, 5), which is now (3, 0, -5), removed. Thus, the optimal branching +# is not a spanning arborescence. The code finds optimal_branching_2a. +# An alternative and equivalent branching is optimal_branching_2b. We would +# need to modify the code to iterate through all equivalent optimal branchings. +# +# These are maximal branchings or arborescences. +optimal_branching_2a = [ + (5, 6, 4), + (6, 2, 11), + (6, 8, 5), + (8, 7, 8), + (2, 1, 7), + (2, 3, 11), + (3, 4, 7), +] +optimal_branching_2b = [ + (8, 7, 8), + (7, 3, 9), + (3, 4, 7), + (3, 6, 8), + (6, 2, 11), + (2, 1, 7), + (1, 5, 3), +] +optimal_arborescence_2 = [ + (0, 2, 2), + (2, 1, 7), + (2, 3, 11), + (1, 5, 3), + (3, 4, 7), + (3, 6, 8), + (6, 8, 5), + (8, 7, 8), +] + +# Two suboptimal maximal branchings on G1 obtained from a greedy algorithm. +# 1a matches what is shown in Figure G in Edmonds's paper. +greedy_subopt_branching_1a = [ + (5, 6, 14), + (6, 2, 21), + (6, 8, 15), + (8, 7, 18), + (2, 1, 17), + (2, 3, 21), + (3, 0, 5), + (3, 4, 17), +] +greedy_subopt_branching_1b = [ + (8, 7, 18), + (7, 6, 15), + (6, 2, 21), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 0, 5), + (3, 4, 17), +] + + +def build_branching(edges, double=False): + G = nx.DiGraph() + for u, v, weight in edges: + G.add_edge(u, v, weight=weight) + if double: + G.add_edge(u + 9, v + 9, weight=weight) + return G + + +def sorted_edges(G, attr="weight", default=1): + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + edges = sorted(edges, key=lambda x: (x[2], x[1], x[0])) + return edges + + +def assert_equal_branchings(G1, G2, attr="weight", default=1): + edges1 = list(G1.edges(data=True)) + edges2 = list(G2.edges(data=True)) + assert len(edges1) == len(edges2) + + # Grab the weights only. + e1 = sorted_edges(G1, attr, default) + e2 = sorted_edges(G2, attr, default) + + for a, b in zip(e1, e2): + assert a[:2] == b[:2] + np.testing.assert_almost_equal(a[2], b[2]) + + +################ + + +def test_optimal_branching1(): + G = build_branching(optimal_arborescence_1) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 131 + + +def test_optimal_branching2a(): + G = build_branching(optimal_branching_2a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_branching2b(): + G = build_branching(optimal_branching_2b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_arborescence2(): + G = build_branching(optimal_arborescence_2) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 51 + + +def test_greedy_suboptimal_branching1a(): + G = build_branching(greedy_subopt_branching_1a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 128 + + +def test_greedy_suboptimal_branching1b(): + G = build_branching(greedy_subopt_branching_1b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 127 + + +def test_greedy_max1(): + # Standard test. + # + G = G1() + B = branchings.greedy_branching(G) + # There are only two possible greedy branchings. The sorting is such + # that it should equal the second suboptimal branching: 1b. + B_ = build_branching(greedy_subopt_branching_1b) + assert_equal_branchings(B, B_) + + +def test_greedy_branching_kwarg_kind(): + G = G1() + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + B = branchings.greedy_branching(G, kind="lol") + + +def test_greedy_branching_for_unsortable_nodes(): + G = nx.DiGraph() + G.add_weighted_edges_from([((2, 3), 5, 1), (3, "a", 1), (2, 4, 5)]) + edges = [(u, v, data.get("weight", 1)) for (u, v, data) in G.edges(data=True)] + with pytest.raises(TypeError): + edges.sort(key=itemgetter(2, 0, 1), reverse=True) + B = branchings.greedy_branching(G, kind="max").edges(data=True) + assert list(B) == [ + ((2, 3), 5, {"weight": 1}), + (3, "a", {"weight": 1}), + (2, 4, {"weight": 5}), + ] + + +def test_greedy_max2(): + # Different default weight. + # + G = G1() + del G[1][0][0]["weight"] + B = branchings.greedy_branching(G, default=6) + # Chosen so that edge (3,0,5) is not selected and (1,0,6) is instead. + + edges = [ + (1, 0, 6), + (1, 5, 13), + (7, 6, 15), + (2, 1, 17), + (3, 4, 17), + (8, 7, 18), + (2, 3, 21), + (6, 2, 21), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_greedy_max3(): + # All equal weights. + # + G = G1() + B = branchings.greedy_branching(G, attr=None) + + # This is mostly arbitrary...the output was generated by running the algo. + edges = [ + (2, 1, 1), + (3, 0, 1), + (3, 4, 1), + (5, 8, 1), + (6, 2, 1), + (7, 3, 1), + (7, 6, 1), + (8, 7, 1), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_, default=1) + + +def test_greedy_min(): + G = G1() + B = branchings.greedy_branching(G, kind="min") + + edges = [ + (1, 0, 4), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (7, 3, 19), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_edmonds1_maxbranch(): + G = G1() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_maxarbor(): + G = G1() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_minimal_branching(): + # graph will have something like a minimum arborescence but no spanning one + G = nx.from_numpy_array(G_big_array, create_using=nx.DiGraph) + B = branchings.minimal_branching(G) + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + B_ = build_branching(edges, double=True) + assert_equal_branchings(B, B_) + + +def test_edmonds2_maxbranch(): + G = G2() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_branching_2a) + assert_equal_branchings(x, x_) + + +def test_edmonds2_maxarbor(): + G = G2() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_2) + assert_equal_branchings(x, x_) + + +def test_edmonds2_minarbor(): + G = G1() + x = branchings.minimum_spanning_arborescence(G) + # This was obtained from algorithm. Need to verify it independently. + # Branch weight is: 96 + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch1(): + G = G1() + x = branchings.minimum_branching(G) + edges = [] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch2(): + G = G1() + G.add_edge(8, 9, weight=-10) + x = branchings.minimum_branching(G) + edges = [(8, 9, -10)] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +# Need more tests + + +def test_mst(): + # Make sure we get the same results for undirected graphs. + # Example from: https://en.wikipedia.org/wiki/Kruskal's_algorithm + G = nx.Graph() + edgelist = [ + (0, 3, [("weight", 5)]), + (0, 1, [("weight", 7)]), + (1, 3, [("weight", 9)]), + (1, 2, [("weight", 8)]), + (1, 4, [("weight", 7)]), + (3, 4, [("weight", 15)]), + (3, 5, [("weight", 6)]), + (2, 4, [("weight", 5)]), + (4, 5, [("weight", 8)]), + (4, 6, [("weight", 9)]), + (5, 6, [("weight", 11)]), + ] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + edges = [ + ({0, 1}, 7), + ({0, 3}, 5), + ({3, 5}, 6), + ({1, 4}, 7), + ({4, 2}, 5), + ({4, 6}, 9), + ] + + assert x.number_of_edges() == len(edges) + for u, v, d in x.edges(data=True): + assert ({u, v}, d["weight"]) in edges + + +def test_mixed_nodetypes(): + # Smoke test to make sure no TypeError is raised for mixed node types. + G = nx.Graph() + edgelist = [(0, 3, [("weight", 5)]), (0, "1", [("weight", 5)])] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + +def test_edmonds1_minbranch(): + # Using -G_array and min should give the same as optimal_arborescence_1, + # but with all edges negative. + edges = [(u, v, -w) for (u, v, w) in optimal_arborescence_1] + + G = nx.from_numpy_array(-G_array, create_using=nx.DiGraph) + + # Quickly make sure max branching is empty. + x = branchings.maximum_branching(G) + x_ = build_branching([]) + assert_equal_branchings(x, x_) + + # Now test the min branching. + x = branchings.minimum_branching(G) + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edge_attribute_preservation_normal_graph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for normal graphs. + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1]["otherattr"] == 1 + assert B[0][1]["otherattr2"] == 3 + + +def test_edge_attribute_preservation_multigraph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for multigraphs. + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1][0]["otherattr"] == 1 + assert B[0][1][0]["otherattr2"] == 3 + + +# TODO remove with Edmonds +def test_Edmond_kind(): + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + ed = branchings.Edmonds(G) + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + ed.find_optimum(kind="lol", preserve_attrs=True) + + +# TODO remove with MultiDiGraph_EdgeKey +def test_MultiDiGraph_EdgeKey(): + # test if more than one edges has the same key + G = branchings.MultiDiGraph_EdgeKey() + G.add_edge(1, 2, "A") + with pytest.raises(Exception, match="Key 'A' is already in use."): + G.add_edge(3, 4, "A") + # test if invalid edge key was specified + with pytest.raises(KeyError, match="Invalid edge key 'B'"): + G.remove_edge_with_key("B") + # test remove_edge_with_key works + if G.remove_edge_with_key("A"): + assert list(G.edges(data=True)) == [] + # test that remove_edges_from doesn't work + G.add_edge(1, 3, "A") + with pytest.raises(NotImplementedError): + G.remove_edges_from([(1, 3)]) + + +def test_edge_attribute_discard(): + # Test that edge attributes are discarded if we do not specify to keep them + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=False) + + edge_dict = B[0][1] + with pytest.raises(KeyError): + _ = edge_dict["otherattr"] + + +def test_partition_spanning_arborescence(): + """ + Test that we can generate minimum spanning arborescences which respect the + given partition. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + G[3][0]["partition"] = nx.EdgePartition.EXCLUDED + G[2][3]["partition"] = nx.EdgePartition.INCLUDED + G[7][3]["partition"] = nx.EdgePartition.EXCLUDED + G[0][2]["partition"] = nx.EdgePartition.EXCLUDED + G[6][2]["partition"] = nx.EdgePartition.INCLUDED + + actual_edges = [ + (0, 4, 12), + (1, 0, 4), + (1, 5, 13), + (2, 3, 21), + (4, 7, 12), + (5, 6, 14), + (5, 8, 12), + (6, 2, 21), + ] + + B = branchings.minimum_spanning_arborescence(G, partition="partition") + assert_equal_branchings(build_branching(actual_edges), B) + + +def test_arborescence_iterator_min(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly increasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator(G): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_max(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly decreasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = math.inf + for B in branchings.ArborescenceIterator(G, minimum=False): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight <= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_initial_partition(): + """ + Tests the arborescence iterator with three included edges and three excluded + in the initial partition. + + A brute force method similar to the one used in the above tests found that + there are 16 arborescences which contain the included edges and not the + excluded edges. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + included_edges = [(1, 0), (5, 6), (8, 7)] + excluded_edges = [(0, 2), (3, 6), (1, 5)] + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator( + G, init_partition=(included_edges, excluded_edges) + ): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + for e in included_edges: + assert e in B.edges + for e in excluded_edges: + assert e not in B.edges + assert arborescence_count == 16 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_coding.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_coding.py new file mode 100644 index 0000000000000000000000000000000000000000..c695fea5fdab4f09f79c69f8837bb07f65d16540 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_coding.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`~networkx.algorithms.tree.coding` module.""" +from itertools import product + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPruferSequence: + """Unit tests for the Prüfer sequence encoding and decoding + functions. + + """ + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_prufer_sequence(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.null_graph()) + + def test_trivial_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.trivial_graph()) + + def test_bad_integer_labels(self): + with pytest.raises(KeyError): + T = nx.Graph(nx.utils.pairwise("abc")) + nx.to_prufer_sequence(T) + + def test_encoding(self): + """Tests for encoding a tree as a Prüfer sequence using the + iterative strategy. + + """ + # Example from Wikipedia. + tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]) + sequence = nx.to_prufer_sequence(tree) + assert sequence == [3, 3, 3, 4] + + def test_decoding(self): + """Tests for decoding a tree from a Prüfer sequence.""" + # Example from Wikipedia. + sequence = [3, 3, 3, 4] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(6))) + edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + assert edges_equal(list(tree.edges()), edges) + + def test_decoding2(self): + # Example from "An Optimal Algorithm for Prufer Codes". + sequence = [2, 4, 0, 1, 3, 3] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(8))) + edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)] + assert edges_equal(list(tree.edges()), edges) + + def test_inverse(self): + """Tests that the encoding and decoding functions are inverses.""" + for T in nx.nonisomorphic_trees(4): + T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T)) + assert nodes_equal(list(T), list(T2)) + assert edges_equal(list(T.edges()), list(T2.edges())) + + for seq in product(range(4), repeat=2): + seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq)) + assert list(seq) == seq2 + + +class TestNestedTuple: + """Unit tests for the nested tuple encoding and decoding functions.""" + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_nested_tuple(G, 0) + + def test_unknown_root(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.to_nested_tuple(G, "bogus") + + def test_encoding(self): + T = nx.full_rary_tree(2, 2**3 - 1) + expected = (((), ()), ((), ())) + actual = nx.to_nested_tuple(T, 0) + assert nodes_equal(expected, actual) + + def test_canonical_form(self): + T = nx.Graph() + T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + T.add_edges_from([(1, 4), (1, 5)]) + T.add_edges_from([(3, 6), (3, 7)]) + root = 0 + actual = nx.to_nested_tuple(T, root, canonical_form=True) + expected = ((), ((), ()), ((), ())) + assert actual == expected + + def test_decoding(self): + balanced = (((), ()), ((), ())) + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.from_nested_tuple(balanced) + assert nx.is_isomorphic(expected, actual) + + def test_sensible_relabeling(self): + balanced = (((), ()), ((), ())) + T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + assert nodes_equal(list(T), list(range(2**3 - 1))) + assert edges_equal(list(T.edges()), edges) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_decomposition.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_decomposition.py new file mode 100644 index 0000000000000000000000000000000000000000..8c376053794537611f46c038ed074eb92b1ba676 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_decomposition.py @@ -0,0 +1,79 @@ +import networkx as nx +from networkx.algorithms.tree.decomposition import junction_tree + + +def test_junction_tree_directed_confounders(): + B = nx.DiGraph() + B.add_edges_from([("A", "C"), ("B", "C"), ("C", "D"), ("C", "E")]) + + G = junction_tree(B) + J = nx.Graph() + J.add_edges_from( + [ + (("C", "E"), ("C",)), + (("C",), ("A", "B", "C")), + (("A", "B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_nodes(): + B = nx.DiGraph() + B.add_nodes_from([("A", "B", "C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B", "C", "D")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_cascade(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("B", "C"), ("C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "B"), ("B",)), + (("B",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_edges(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("C", "D"), ("E", "F")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B"), ("C", "D"), ("E", "F")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_undirected(): + B = nx.Graph() + B.add_edges_from([("A", "C"), ("A", "D"), ("B", "C"), ("C", "E")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "D"), ("A",)), + (("A",), ("A", "C")), + (("A", "C"), ("C",)), + (("C",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "E")), + ] + ) + + assert nx.is_isomorphic(G, J) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_mst.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_mst.py new file mode 100644 index 0000000000000000000000000000000000000000..373f16cf7a016f1ed56ba2826745783568f20b5e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_mst.py @@ -0,0 +1,708 @@ +"""Unit tests for the :mod:`networkx.algorithms.tree.mst` module.""" + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def test_unknown_algorithm(): + with pytest.raises(ValueError): + nx.minimum_spanning_tree(nx.Graph(), algorithm="random") + with pytest.raises( + ValueError, match="random is not a valid choice for an algorithm." + ): + nx.maximum_spanning_edges(nx.Graph(), algorithm="random") + + +class MinimumSpanningTreeTestBase: + """Base class for test classes for minimum spanning tree algorithms. + This class contains some common tests that will be inherited by + subclasses. Each subclass must have a class attribute + :data:`algorithm` that is a string representing the algorithm to + run, as described under the ``algorithm`` keyword argument for the + :func:`networkx.minimum_spanning_edges` function. Subclasses can + then implement any algorithm-specific tests. + """ + + def setup_method(self, method): + """Creates an example graph and stores the expected minimum and + maximum spanning tree edges. + """ + # This stores the class attribute `algorithm` in an instance attribute. + self.algo = self.algorithm + # This example graph comes from Wikipedia: + # https://en.wikipedia.org/wiki/Kruskal's_algorithm + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + self.minimum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (0, 3, {"weight": 5}), + (1, 4, {"weight": 7}), + (2, 4, {"weight": 5}), + (3, 5, {"weight": 6}), + (4, 6, {"weight": 9}), + ] + self.maximum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (1, 2, {"weight": 8}), + (1, 3, {"weight": 9}), + (3, 4, {"weight": 15}), + (4, 6, {"weight": 9}), + (5, 6, {"weight": 11}), + ] + + def test_minimum_edges(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_edges(self): + edges = nx.maximum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_without_data(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo, data=False) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + + def test_nan_weights(self): + # Edge weights NaN never appear in the spanning tree. see #2164 + G = self.G + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + # Now test for raising exception + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm=self.algo, data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_MultiGraph(self): + G = nx.MultiGraph() + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm="prim", data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm="prim", data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_order(self): + # now try again with a nan edge at the beginning of G.nodes + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_edge(0, 7, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_isolated_node(self): + # now try again with an isolated node + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_node(0) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_minimum_tree(self): + T = nx.minimum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_tree(self): + T = nx.maximum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_disconnected(self): + G = nx.Graph([(0, 1, {"weight": 1}), (2, 3, {"weight": 2})]) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(list(T), list(range(4))) + assert edges_equal(list(T.edges()), [(0, 1), (2, 3)]) + + def test_empty_graph(self): + G = nx.empty_graph(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(sorted(T), list(range(3))) + assert T.number_of_edges() == 0 + + def test_attributes(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1, color="red", distance=7) + G.add_edge(2, 3, weight=1, color="green", distance=2) + G.add_edge(1, 3, weight=10, color="blue", distance=1) + G.graph["foo"] = "bar" + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert T.graph == G.graph + assert nodes_equal(T, G) + for u, v in T.edges(): + assert T.adj[u][v] == G.adj[u][v] + + def test_weight_attribute(self): + G = nx.Graph() + G.add_edge(0, 1, weight=1, distance=7) + G.add_edge(0, 2, weight=30, distance=1) + G.add_edge(1, 2, weight=1, distance=1) + G.add_node(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 2), (1, 2)]) + T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 1), (0, 2)]) + + +class TestBoruvka(MinimumSpanningTreeTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Borůvka's algorithm. + """ + + algorithm = "boruvka" + + def test_unicode_name(self): + """Tests that using a Unicode string can correctly indicate + Borůvka's algorithm. + """ + edges = nx.minimum_spanning_edges(self.G, algorithm="borůvka") + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + +class MultigraphMSTTestBase(MinimumSpanningTreeTestBase): + # Abstract class + + def test_multigraph_keys_min(self): + """Tests that the minimum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + min_edges = nx.minimum_spanning_edges + mst_edges = min_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "b")], list(mst_edges)) + + def test_multigraph_keys_max(self): + """Tests that the maximum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + max_edges = nx.maximum_spanning_edges + mst_edges = max_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "a")], list(mst_edges)) + + +class TestKruskal(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Kruskal's algorithm. + """ + + algorithm = "kruskal" + + def test_key_data_bool(self): + """Tests that the keys and data values are included in + MST edges based on whether keys and data parameters are + true or false""" + G = nx.MultiGraph() + G.add_edge(1, 2, key=1, weight=2) + G.add_edge(1, 2, key=2, weight=3) + G.add_edge(3, 2, key=1, weight=2) + G.add_edge(3, 1, key=1, weight=4) + + # keys are included and data is not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=False + ) + assert edges_equal([(1, 2, 1), (2, 3, 1)], list(mst_edges)) + + # keys are not included and data is included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=True + ) + assert edges_equal( + [(1, 2, {"weight": 2}), (2, 3, {"weight": 2})], list(mst_edges) + ) + + # both keys and data are not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=False + ) + assert edges_equal([(1, 2), (2, 3)], list(mst_edges)) + + # both keys and data are included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=True + ) + assert edges_equal( + [(1, 2, 1, {"weight": 2}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + +class TestPrim(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Prim's algorithm. + """ + + algorithm = "prim" + + def test_prim_mst_edges_simple_graph(self): + H = nx.Graph() + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, {"key": 2, "weight": 3}), (2, 3, {"key": 1, "weight": 2})], + list(mst_edges), + ) + + def test_ignore_nan(self): + """Tests that the edges with NaN weights are ignored or + raise an Error based on ignore_nan is true or false""" + H = nx.MultiGraph() + H.add_edge(1, 2, key=1, weight=float("nan")) + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + # NaN weight edges are ignored when ignore_nan=True + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, 2, {"weight": 3}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + # NaN weight edges raise Error when ignore_nan=False + with pytest.raises(ValueError): + list(nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=False)) + + def test_multigraph_keys_tree(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 1)], list(T.edges(data="weight"))) + + def test_multigraph_keys_tree_max(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.maximum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 2)], list(T.edges(data="weight"))) + + +class TestSpanningTreeIterator: + """ + Tests the spanning tree iterator on the example graph in the 2005 Sörensen + and Janssens paper An Algorithm to Generate all Spanning Trees of a Graph in + Order of Increasing Cost + """ + + def setup_method(self): + # Original Graph + edges = [(0, 1, 5), (1, 2, 4), (1, 4, 6), (2, 3, 5), (2, 4, 7), (3, 4, 3)] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + # List of lists of spanning trees in increasing order + self.spanning_trees = [ + # 1, MST, cost = 17 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 2, cost = 18 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (3, 4, {"weight": 3}), + ], + # 3, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 4, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 5, cost = 20 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + ], + # 6, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 7, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + # 8, cost = 23 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + ] + + def test_minimum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in increasing order + """ + tree_index = 0 + for tree in nx.SpanningTreeIterator(self.G): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index += 1 + + def test_maximum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in decreasing order + """ + tree_index = 7 + for tree in nx.SpanningTreeIterator(self.G, minimum=False): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index -= 1 + + +def test_random_spanning_tree_multiplicative_small(): + """ + Using a fixed seed, sample one tree for repeatability. + """ + from math import exp + + pytest.importorskip("scipy") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + solution_edges = [(2, 3), (3, 4), (0, 5), (5, 4), (4, 1)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=42) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_multiplicative_large(): + """ + Sample many trees from the distribution created in the last test + """ + from math import exp + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + # Find the multiplicative weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 1 + for u, v, d in t.edges(data="lambda_key"): + weight *= d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.15. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 1200 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=rng) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_additive_small(): + """ + Sample a single spanning tree from the additive method. + """ + pytest.importorskip("scipy") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree( + G, weight="weight", multiplicative=False, seed=37 + ) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_additive_large(): + """ + Sample many spanning trees from the additive method. + """ + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + # Find the additive weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 0 + for u, v, d in t.edges(data="weight"): + weight += d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.07. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 500 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree( + G, "weight", multiplicative=False, seed=rng + ) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_operations.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_operations.py new file mode 100644 index 0000000000000000000000000000000000000000..284d94e2e5059de267b5ea47f6012a42c6ac4639 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_operations.py @@ -0,0 +1,53 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def _check_custom_label_attribute(input_trees, res_tree, label_attribute): + res_attr_dict = nx.get_node_attributes(res_tree, label_attribute) + res_attr_set = set(res_attr_dict.values()) + input_label = (tree for tree, root in input_trees) + input_label_set = set(chain.from_iterable(input_label)) + return res_attr_set == input_label_set + + +def test_empty_sequence(): + """Joining the empty sequence results in the tree with one node.""" + T = nx.join_trees([]) + assert len(T) == 1 + assert T.number_of_edges() == 0 + + +def test_single(): + """Joining just one tree yields a tree with one more node.""" + T = nx.empty_graph(1) + trees = [(T, 0)] + actual_with_label = nx.join_trees(trees, label_attribute="custom_label") + expected = nx.path_graph(2) + assert nodes_equal(list(expected), list(actual_with_label)) + assert edges_equal(list(expected.edges()), list(actual_with_label.edges())) + + +def test_basic(): + """Joining multiple subtrees at a root node.""" + trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.join_trees(trees, label_attribute="old_labels") + assert nx.is_isomorphic(actual, expected) + assert _check_custom_label_attribute(trees, actual, "old_labels") + + actual_without_label = nx.join_trees(trees) + assert nx.is_isomorphic(actual_without_label, expected) + # check that no labels were stored + assert all(not data for _, data in actual_without_label.nodes(data=True)) + + +def test_first_label(): + """Test the functionality of the first_label argument.""" + T1 = nx.path_graph(3) + T2 = nx.path_graph(2) + actual = nx.join_trees([(T1, 0), (T2, 0)], first_label=10) + expected_nodes = set(range(10, 16)) + assert set(actual.nodes()) == expected_nodes + assert set(actual.neighbors(10)) == {11, 14} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_recognition.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c6c5aade9f1b3a317541c68affd4b5c2f21752 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tree/tests/test_recognition.py @@ -0,0 +1,162 @@ +import pytest + +import networkx as nx + + +class TestTreeRecognition: + graph = nx.Graph + multigraph = nx.MultiGraph + + @classmethod + def setup_class(cls): + cls.T1 = cls.graph() + + cls.T2 = cls.graph() + cls.T2.add_node(1) + + cls.T3 = cls.graph() + cls.T3.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T3.add_edges_from(edges) + + cls.T5 = cls.multigraph() + cls.T5.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T5.add_edges_from(edges) + + cls.T6 = cls.graph() + cls.T6.add_nodes_from([6, 7]) + cls.T6.add_edge(6, 7) + + cls.F1 = nx.compose(cls.T6, cls.T3) + + cls.N4 = cls.graph() + cls.N4.add_node(1) + cls.N4.add_edge(1, 1) + + cls.N5 = cls.graph() + cls.N5.add_nodes_from(range(5)) + + cls.N6 = cls.graph() + cls.N6.add_nodes_from(range(3)) + cls.N6.add_edges_from([(0, 1), (1, 2), (2, 0)]) + + cls.NF1 = nx.compose(cls.T6, cls.N6) + + def test_null_tree(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.graph()) + + def test_null_tree2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.multigraph()) + + def test_null_forest(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.graph()) + + def test_null_forest2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.multigraph()) + + def test_is_tree(self): + assert nx.is_tree(self.T2) + assert nx.is_tree(self.T3) + assert nx.is_tree(self.T5) + + def test_is_not_tree(self): + assert not nx.is_tree(self.N4) + assert not nx.is_tree(self.N5) + assert not nx.is_tree(self.N6) + + def test_is_forest(self): + assert nx.is_forest(self.T2) + assert nx.is_forest(self.T3) + assert nx.is_forest(self.T5) + assert nx.is_forest(self.F1) + assert nx.is_forest(self.N5) + + def test_is_not_forest(self): + assert not nx.is_forest(self.N4) + assert not nx.is_forest(self.N6) + assert not nx.is_forest(self.NF1) + + +class TestDirectedTreeRecognition(TestTreeRecognition): + graph = nx.DiGraph + multigraph = nx.MultiDiGraph + + +def test_disconnected_graph(): + # https://github.com/networkx/networkx/issues/1144 + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + +def test_dag_nontree(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (0, 2), (1, 2)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_multicycle(): + G = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (0, 1)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_emptybranch(): + G = nx.DiGraph() + G.add_nodes_from(range(10)) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_path(): + G = nx.DiGraph() + nx.add_path(G, range(5)) + assert nx.is_branching(G) + assert nx.is_arborescence(G) + + +def test_notbranching1(): + # Acyclic violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (1, 0)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notbranching2(): + # In-degree violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (3, 2)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence1(): + # Not an arborescence due to not spanning. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (5, 6)]) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence2(): + # Not an arborescence due to in-degree violation. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + G.add_edge(6, 4) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G)