diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Cygdb.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Cygdb.py new file mode 100644 index 0000000000000000000000000000000000000000..596e5e11b010163ffca658a1c3f7f740304fe380 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Cygdb.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python + +""" +The Cython debugger + +The current directory should contain a directory named 'cython_debug', or a +path to the cython project directory should be given (the parent directory of +cython_debug). + +Additional gdb args can be provided only if a path to the project directory is +given. +""" + +import os +import sys +import glob +import tempfile +import textwrap +import subprocess +import optparse +import logging + +logger = logging.getLogger(__name__) + + +def make_command_file(path_to_debug_info, prefix_code='', + no_import=False, skip_interpreter=False): + if not no_import: + pattern = os.path.join(path_to_debug_info, + 'cython_debug', + 'cython_debug_info_*') + debug_files = glob.glob(pattern) + + if not debug_files: + sys.exit('%s.\nNo debug files were found in %s. Aborting.' % ( + usage, os.path.abspath(path_to_debug_info))) + + fd, tempfilename = tempfile.mkstemp() + f = os.fdopen(fd, 'w') + try: + f.write(prefix_code) + f.write(textwrap.dedent('''\ + # This is a gdb command file + # See https://sourceware.org/gdb/onlinedocs/gdb/Command-Files.html + + set breakpoint pending on + set print pretty on + + python + try: + # Activate virtualenv, if we were launched from one + import os + virtualenv = os.getenv('VIRTUAL_ENV') + if virtualenv: + path_to_activate_this_py = os.path.join(virtualenv, 'bin', 'activate_this.py') + print("gdb command file: Activating virtualenv: %s; path_to_activate_this_py: %s" % ( + virtualenv, path_to_activate_this_py)) + with open(path_to_activate_this_py) as f: + exec(f.read(), dict(__file__=path_to_activate_this_py)) + from Cython.Debugger import libcython, libpython + except Exception as ex: + from traceback import print_exc + print("There was an error in Python code originating from the file ''' + str(__file__) + '''") + print("It used the Python interpreter " + str(sys.executable)) + print_exc() + exit(1) + end + ''')) + + if no_import: + # don't do this, this overrides file command in .gdbinit + # f.write("file %s\n" % sys.executable) + pass + else: + if not skip_interpreter: + # Point Cygdb to the interpreter that was used to generate + # the debugging information. + path = os.path.join(path_to_debug_info, "cython_debug", "interpreter") + interpreter_file = open(path) + try: + interpreter = interpreter_file.read() + finally: + interpreter_file.close() + f.write("file %s\n" % interpreter) + + f.write('\n'.join('cy import %s\n' % fn for fn in debug_files)) + + if not skip_interpreter: + f.write(textwrap.dedent('''\ + python + import sys + try: + gdb.lookup_type('PyModuleObject') + except RuntimeError: + sys.stderr.write( + "''' + interpreter + ''' was not compiled with debug symbols (or it was " + "stripped). Some functionality may not work (properly).\\n") + end + ''')) + + f.write("source .cygdbinit") + finally: + f.close() + + return tempfilename + +usage = "Usage: cygdb [options] [PATH [-- GDB_ARGUMENTS]]" + +def main(path_to_debug_info=None, gdb_argv=None, no_import=False): + """ + Start the Cython debugger. This tells gdb to import the Cython and Python + extensions (libcython.py and libpython.py) and it enables gdb's pending + breakpoints. + + path_to_debug_info is the path to the Cython build directory + gdb_argv is the list of options to gdb + no_import tells cygdb whether it should import debug information + """ + parser = optparse.OptionParser(usage=usage) + parser.add_option("--gdb-executable", + dest="gdb", default='gdb', + help="gdb executable to use [default: gdb]") + parser.add_option("--verbose", "-v", + dest="verbosity", action="count", default=0, + help="Verbose mode. Multiple -v options increase the verbosity") + parser.add_option("--skip-interpreter", + dest="skip_interpreter", default=False, action="store_true", + help="Do not automatically point GDB to the same interpreter " + "used to generate debugging information") + + (options, args) = parser.parse_args() + if path_to_debug_info is None: + if len(args) > 1: + path_to_debug_info = args[0] + else: + path_to_debug_info = os.curdir + + if gdb_argv is None: + gdb_argv = args[1:] + + if path_to_debug_info == '--': + no_import = True + + logging_level = logging.WARN + if options.verbosity == 1: + logging_level = logging.INFO + if options.verbosity >= 2: + logging_level = logging.DEBUG + logging.basicConfig(level=logging_level) + + skip_interpreter = options.skip_interpreter + + logger.info("verbosity = %r", options.verbosity) + logger.debug("options = %r; args = %r", options, args) + logger.debug("Done parsing command-line options. path_to_debug_info = %r, gdb_argv = %r", + path_to_debug_info, gdb_argv) + + tempfilename = make_command_file(path_to_debug_info, + no_import=no_import, + skip_interpreter=skip_interpreter) + logger.info("Launching %s with command file: %s and gdb_argv: %s", + options.gdb, tempfilename, gdb_argv) + with open(tempfilename) as tempfile: + logger.debug('Command file (%s) contains: """\n%s"""', tempfilename, tempfile.read()) + logger.info("Spawning %s...", options.gdb) + p = subprocess.Popen([options.gdb, '-command', tempfilename] + gdb_argv) + logger.info("Spawned %s (pid %d)", options.gdb, p.pid) + while True: + try: + logger.debug("Waiting for gdb (pid %d) to exit...", p.pid) + ret = p.wait() + logger.debug("Wait for gdb (pid %d) to exit is done. Returned: %r", p.pid, ret) + except KeyboardInterrupt: + pass + else: + break + logger.debug("Closing temp command file with fd: %s", tempfile.fileno()) + logger.debug("Removing temp command file: %s", tempfilename) + os.remove(tempfilename) + logger.debug("Removed temp command file: %s", tempfilename) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/DebugWriter.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/DebugWriter.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3c310fc64555d3028fa6370ae4dc87ff836108 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/DebugWriter.py @@ -0,0 +1,90 @@ +from __future__ import absolute_import + +import os +import sys +import errno + +try: + from lxml import etree + have_lxml = True +except ImportError: + have_lxml = False + try: + from xml.etree import cElementTree as etree + except ImportError: + try: + from xml.etree import ElementTree as etree + except ImportError: + etree = None + +from ..Compiler import Errors +from ..Compiler.StringEncoding import EncodedString + + +def is_valid_tag(name): + """ + Names like '.0' are used internally for arguments + to functions creating generator expressions, + however they are not identifiers. + + See https://github.com/cython/cython/issues/5552 + """ + if isinstance(name, EncodedString): + if name.startswith(".") and name[1:].isdecimal(): + return False + return True + + +class CythonDebugWriter(object): + """ + Class to output debugging information for cygdb + + It writes debug information to cython_debug/cython_debug_info_ + in the build directory. + """ + + def __init__(self, output_dir): + if etree is None: + raise Errors.NoElementTreeInstalledException() + + self.output_dir = os.path.join(output_dir or os.curdir, 'cython_debug') + self.tb = etree.TreeBuilder() + # set by Cython.Compiler.ParseTreeTransforms.DebugTransform + self.module_name = None + self.start('cython_debug', attrs=dict(version='1.0')) + + def start(self, name, attrs=None): + if is_valid_tag(name): + self.tb.start(name, attrs or {}) + + def end(self, name): + if is_valid_tag(name): + self.tb.end(name) + + def add_entry(self, name, **attrs): + if is_valid_tag(name): + self.tb.start(name, attrs) + self.tb.end(name) + + def serialize(self): + self.tb.end('Module') + self.tb.end('cython_debug') + xml_root_element = self.tb.close() + + try: + os.makedirs(self.output_dir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + et = etree.ElementTree(xml_root_element) + kw = {} + if have_lxml: + kw['pretty_print'] = True + + fn = "cython_debug_info_" + self.module_name + et.write(os.path.join(self.output_dir, fn), encoding="UTF-8", **kw) + + interpreter_path = os.path.join(self.output_dir, 'interpreter') + with open(interpreter_path, 'w') as f: + f.write(sys.executable) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a9dd10e5684428476a84504f9b2065141a84fec Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/test_libpython_in_gdb.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/test_libpython_in_gdb.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd4cfb7012347407edb4491c7835c03755cd80c7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/test_libpython_in_gdb.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/cfuncs.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/cfuncs.c new file mode 100644 index 0000000000000000000000000000000000000000..ccb42050bf7e5245a7f634814bf41c343557a749 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/cfuncs.c @@ -0,0 +1,8 @@ +void +some_c_function(void) +{ + int a, b, c; + + a = 1; + b = 2; +} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__init__.py @@ -0,0 +1 @@ +# empty file diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/DebugWriter.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/DebugWriter.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94daed61afe6cc71d84791c50af0f67781dca50f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/DebugWriter.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/libcython.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/libcython.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c778c284e942567a7ef10f4686652dc02fa853af Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/libcython.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/libcython.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/libcython.py new file mode 100644 index 0000000000000000000000000000000000000000..0f79dba2a5b6f247d7be926e0dd187f21885ca8e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/libcython.py @@ -0,0 +1,1461 @@ +""" +GDB extension that adds Cython support. +""" + +from __future__ import print_function + +try: + input = raw_input +except NameError: + pass + +import sys +import textwrap +import functools +import itertools +import collections + +import gdb + +try: # python 2 + UNICODE = unicode + BYTES = str +except NameError: # python 3 + UNICODE = str + BYTES = bytes + +try: + from lxml import etree + have_lxml = True +except ImportError: + have_lxml = False + try: + # Python 2.5 + from xml.etree import cElementTree as etree + except ImportError: + try: + # Python 2.5 + from xml.etree import ElementTree as etree + except ImportError: + try: + # normal cElementTree install + import cElementTree as etree + except ImportError: + # normal ElementTree install + import elementtree.ElementTree as etree + +try: + import pygments.lexers + import pygments.formatters +except ImportError: + pygments = None + sys.stderr.write("Install pygments for colorized source code.\n") + +if hasattr(gdb, 'string_to_argv'): + from gdb import string_to_argv +else: + from shlex import split as string_to_argv + +from Cython.Debugger import libpython + +# C or Python type +CObject = 'CObject' +PythonObject = 'PythonObject' + +_data_types = dict(CObject=CObject, PythonObject=PythonObject) +_filesystemencoding = sys.getfilesystemencoding() or 'UTF-8' + + +# decorators + +def default_selected_gdb_frame(err=True): + def decorator(function): + @functools.wraps(function) + def wrapper(self, frame=None, *args, **kwargs): + try: + frame = frame or gdb.selected_frame() + except RuntimeError: + raise gdb.GdbError("No frame is currently selected.") + + if err and frame.name() is None: + raise NoFunctionNameInFrameError() + + return function(self, frame, *args, **kwargs) + return wrapper + return decorator + + +def require_cython_frame(function): + @functools.wraps(function) + @require_running_program + def wrapper(self, *args, **kwargs): + frame = kwargs.get('frame') or gdb.selected_frame() + if not self.is_cython_function(frame): + raise gdb.GdbError('Selected frame does not correspond with a ' + 'Cython function we know about.') + return function(self, *args, **kwargs) + return wrapper + + +def dispatch_on_frame(c_command, python_command=None): + def decorator(function): + @functools.wraps(function) + def wrapper(self, *args, **kwargs): + is_cy = self.is_cython_function() + is_py = self.is_python_function() + + if is_cy or (is_py and not python_command): + function(self, *args, **kwargs) + elif is_py: + gdb.execute(python_command) + elif self.is_relevant_function(): + gdb.execute(c_command) + else: + raise gdb.GdbError("Not a function cygdb knows about. " + "Use the normal GDB commands instead.") + + return wrapper + return decorator + + +def require_running_program(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + try: + gdb.selected_frame() + except RuntimeError: + raise gdb.GdbError("No frame is currently selected.") + + return function(*args, **kwargs) + return wrapper + + +def gdb_function_value_to_unicode(function): + @functools.wraps(function) + def wrapper(self, string, *args, **kwargs): + if isinstance(string, gdb.Value): + string = string.string() + + return function(self, string, *args, **kwargs) + return wrapper + + +# Classes that represent the debug information +# Don't rename the parameters of these classes, they come directly from the XML + +class CythonModule(object): + def __init__(self, module_name, filename, c_filename): + self.name = module_name + self.filename = filename + self.c_filename = c_filename + self.globals = {} + # {cython_lineno: min(c_linenos)} + self.lineno_cy2c = {} + # {c_lineno: cython_lineno} + self.lineno_c2cy = {} + self.functions = {} + + +class CythonVariable(object): + + def __init__(self, name, cname, qualified_name, type, lineno): + self.name = name + self.cname = cname + self.qualified_name = qualified_name + self.type = type + self.lineno = int(lineno) + + +class CythonFunction(CythonVariable): + def __init__(self, + module, + name, + cname, + pf_cname, + qualified_name, + lineno, + type=CObject, + is_initmodule_function="False"): + super(CythonFunction, self).__init__(name, + cname, + qualified_name, + type, + lineno) + self.module = module + self.pf_cname = pf_cname + self.is_initmodule_function = is_initmodule_function == "True" + self.locals = {} + self.arguments = [] + self.step_into_functions = set() + + +# General purpose classes + +class CythonBase(object): + + @default_selected_gdb_frame(err=False) + def is_cython_function(self, frame): + return frame.name() in self.cy.functions_by_cname + + @default_selected_gdb_frame(err=False) + def is_python_function(self, frame): + """ + Tells if a frame is associated with a Python function. + If we can't read the Python frame information, don't regard it as such. + """ + if frame.name() == 'PyEval_EvalFrameEx': + pyframe = libpython.Frame(frame).get_pyop() + return pyframe and not pyframe.is_optimized_out() + return False + + @default_selected_gdb_frame() + def get_c_function_name(self, frame): + return frame.name() + + @default_selected_gdb_frame() + def get_c_lineno(self, frame): + return frame.find_sal().line + + @default_selected_gdb_frame() + def get_cython_function(self, frame): + result = self.cy.functions_by_cname.get(frame.name()) + if result is None: + raise NoCythonFunctionInFrameError() + + return result + + @default_selected_gdb_frame() + def get_cython_lineno(self, frame): + """ + Get the current Cython line number. Returns 0 if there is no + correspondence between the C and Cython code. + """ + cyfunc = self.get_cython_function(frame) + return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), 0) + + @default_selected_gdb_frame() + def get_source_desc(self, frame): + filename = lineno = lexer = None + if self.is_cython_function(frame): + filename = self.get_cython_function(frame).module.filename + filename_and_lineno = self.get_cython_lineno(frame) + assert filename == filename_and_lineno[0] + lineno = filename_and_lineno[1] + if pygments: + lexer = pygments.lexers.CythonLexer(stripall=False) + elif self.is_python_function(frame): + pyframeobject = libpython.Frame(frame).get_pyop() + + if not pyframeobject: + raise gdb.GdbError( + 'Unable to read information on python frame') + + filename = pyframeobject.filename() + lineno = pyframeobject.current_line_num() + + if pygments: + lexer = pygments.lexers.PythonLexer(stripall=False) + else: + symbol_and_line_obj = frame.find_sal() + if not symbol_and_line_obj or not symbol_and_line_obj.symtab: + filename = None + lineno = 0 + else: + filename = symbol_and_line_obj.symtab.fullname() + lineno = symbol_and_line_obj.line + if pygments: + lexer = pygments.lexers.CLexer(stripall=False) + + return SourceFileDescriptor(filename, lexer), lineno + + @default_selected_gdb_frame() + def get_source_line(self, frame): + source_desc, lineno = self.get_source_desc() + return source_desc.get_source(lineno) + + @default_selected_gdb_frame() + def is_relevant_function(self, frame): + """ + returns whether we care about a frame on the user-level when debugging + Cython code + """ + name = frame.name() + older_frame = frame.older() + if self.is_cython_function(frame) or self.is_python_function(frame): + return True + elif older_frame and self.is_cython_function(older_frame): + # check for direct C function call from a Cython function + cython_func = self.get_cython_function(older_frame) + return name in cython_func.step_into_functions + + return False + + @default_selected_gdb_frame(err=False) + def print_stackframe(self, frame, index, is_c=False): + """ + Print a C, Cython or Python stack frame and the line of source code + if available. + """ + # do this to prevent the require_cython_frame decorator from + # raising GdbError when calling self.cy.cy_cvalue.invoke() + selected_frame = gdb.selected_frame() + frame.select() + + try: + source_desc, lineno = self.get_source_desc(frame) + except NoFunctionNameInFrameError: + print('#%-2d Unknown Frame (compile with -g)' % index) + return + + if not is_c and self.is_python_function(frame): + pyframe = libpython.Frame(frame).get_pyop() + if pyframe is None or pyframe.is_optimized_out(): + # print this python function as a C function + return self.print_stackframe(frame, index, is_c=True) + + func_name = pyframe.co_name + func_cname = 'PyEval_EvalFrameEx' + func_args = [] + elif self.is_cython_function(frame): + cyfunc = self.get_cython_function(frame) + f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame) + + func_name = cyfunc.name + func_cname = cyfunc.cname + func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments] + else: + source_desc, lineno = self.get_source_desc(frame) + func_name = frame.name() + func_cname = func_name + func_args = [] + + try: + gdb_value = gdb.parse_and_eval(func_cname) + except RuntimeError: + func_address = 0 + else: + func_address = gdb_value.address + if not isinstance(func_address, int): + # Seriously? Why is the address not an int? + if not isinstance(func_address, (str, bytes)): + func_address = str(func_address) + func_address = int(func_address.split()[0], 0) + + a = ', '.join('%s=%s' % (name, val) for name, val in func_args) + sys.stdout.write('#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a)) + + if source_desc.filename is not None: + sys.stdout.write(' at %s:%s' % (source_desc.filename, lineno)) + + sys.stdout.write('\n') + + try: + sys.stdout.write(' ' + source_desc.get_source(lineno)) + except gdb.GdbError: + pass + + selected_frame.select() + + def get_remote_cython_globals_dict(self): + m = gdb.parse_and_eval('__pyx_m') + + try: + PyModuleObject = gdb.lookup_type('PyModuleObject') + except RuntimeError: + raise gdb.GdbError(textwrap.dedent("""\ + Unable to lookup type PyModuleObject, did you compile python + with debugging support (-g)?""")) + + m = m.cast(PyModuleObject.pointer()) + return m['md_dict'] + + + def get_cython_globals_dict(self): + """ + Get the Cython globals dict where the remote names are turned into + local strings. + """ + remote_dict = self.get_remote_cython_globals_dict() + pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict) + + result = {} + seen = set() + for k, v in pyobject_dict.iteritems(): + result[k.proxyval(seen)] = v + + return result + + def print_gdb_value(self, name, value, max_name_length=None, prefix=''): + if libpython.pretty_printer_lookup(value): + typename = '' + else: + typename = '(%s) ' % (value.type,) + + if max_name_length is None: + print('%s%s = %s%s' % (prefix, name, typename, value)) + else: + print('%s%-*s = %s%s' % (prefix, max_name_length, name, typename, value)) + + def is_initialized(self, cython_func, local_name): + cyvar = cython_func.locals[local_name] + cur_lineno = self.get_cython_lineno()[1] + + if '->' in cyvar.cname: + # Closed over free variable + if cur_lineno > cython_func.lineno: + if cyvar.type == PythonObject: + return int(gdb.parse_and_eval(cyvar.cname)) + return True + return False + + return cur_lineno > cyvar.lineno + + +class SourceFileDescriptor(object): + def __init__(self, filename, lexer, formatter=None): + self.filename = filename + self.lexer = lexer + self.formatter = formatter + + def valid(self): + return self.filename is not None + + def lex(self, code): + if pygments and self.lexer and parameters.colorize_code: + bg = parameters.terminal_background.value + if self.formatter is None: + formatter = pygments.formatters.TerminalFormatter(bg=bg) + else: + formatter = self.formatter + + return pygments.highlight(code, self.lexer, formatter) + + return code + + def _get_source(self, start, stop, lex_source, mark_line, lex_entire): + with open(self.filename) as f: + # to provide "correct" colouring, the entire code needs to be + # lexed. However, this makes a lot of things terribly slow, so + # we decide not to. Besides, it's unlikely to matter. + + if lex_source and lex_entire: + f = self.lex(f.read()).splitlines() + + slice = itertools.islice(f, start - 1, stop - 1) + + for idx, line in enumerate(slice): + if start + idx == mark_line: + prefix = '>' + else: + prefix = ' ' + + if lex_source and not lex_entire: + line = self.lex(line) + + yield '%s %4d %s' % (prefix, start + idx, line.rstrip()) + + def get_source(self, start, stop=None, lex_source=True, mark_line=0, + lex_entire=False): + exc = gdb.GdbError('Unable to retrieve source code') + + if not self.filename: + raise exc + + start = max(start, 1) + if stop is None: + stop = start + 1 + + try: + return '\n'.join( + self._get_source(start, stop, lex_source, mark_line, lex_entire)) + except IOError: + raise exc + + +# Errors + +class CyGDBError(gdb.GdbError): + """ + Base class for Cython-command related errors + """ + + def __init__(self, *args): + args = args or (self.msg,) + super(CyGDBError, self).__init__(*args) + + +class NoCythonFunctionInFrameError(CyGDBError): + """ + raised when the user requests the current cython function, which is + unavailable + """ + msg = "Current function is a function cygdb doesn't know about" + + +class NoFunctionNameInFrameError(NoCythonFunctionInFrameError): + """ + raised when the name of the C function could not be determined + in the current C stack frame + """ + msg = ('C function name could not be determined in the current C stack ' + 'frame') + + +# Parameters + +class CythonParameter(gdb.Parameter): + """ + Base class for cython parameters + """ + + def __init__(self, name, command_class, parameter_class, default=None): + self.show_doc = self.set_doc = self.__class__.__doc__ + super(CythonParameter, self).__init__(name, command_class, + parameter_class) + if default is not None: + self.value = default + + def __bool__(self): + return bool(self.value) + + __nonzero__ = __bool__ # Python 2 + + + +class CompleteUnqualifiedFunctionNames(CythonParameter): + """ + Have 'cy break' complete unqualified function or method names. + """ + + +class ColorizeSourceCode(CythonParameter): + """ + Tell cygdb whether to colorize source code. + """ + + +class TerminalBackground(CythonParameter): + """ + Tell cygdb about the user's terminal background (light or dark). + """ + + +class CythonParameters(object): + """ + Simple container class that might get more functionality in the distant + future (mostly to remind us that we're dealing with parameters). + """ + + def __init__(self): + self.complete_unqualified = CompleteUnqualifiedFunctionNames( + 'cy_complete_unqualified', + gdb.COMMAND_BREAKPOINTS, + gdb.PARAM_BOOLEAN, + True) + self.colorize_code = ColorizeSourceCode( + 'cy_colorize_code', + gdb.COMMAND_FILES, + gdb.PARAM_BOOLEAN, + True) + self.terminal_background = TerminalBackground( + 'cy_terminal_background_color', + gdb.COMMAND_FILES, + gdb.PARAM_STRING, + "dark") + +parameters = CythonParameters() + + +# Commands + +class CythonCommand(gdb.Command, CythonBase): + """ + Base class for Cython commands + """ + + command_class = gdb.COMMAND_NONE + + @classmethod + def _register(cls, clsname, args, kwargs): + if not hasattr(cls, 'completer_class'): + return cls(clsname, cls.command_class, *args, **kwargs) + else: + return cls(clsname, cls.command_class, cls.completer_class, + *args, **kwargs) + + @classmethod + def register(cls, *args, **kwargs): + alias = getattr(cls, 'alias', None) + if alias: + cls._register(cls.alias, args, kwargs) + + return cls._register(cls.name, args, kwargs) + + +class CyCy(CythonCommand): + """ + Invoke a Cython command. Available commands are: + + cy import + cy break + cy step + cy next + cy run + cy cont + cy finish + cy up + cy down + cy select + cy bt / cy backtrace + cy list + cy print + cy set + cy locals + cy globals + cy exec + """ + + name = 'cy' + command_class = gdb.COMMAND_NONE + completer_class = gdb.COMPLETE_COMMAND + + def __init__(self, name, command_class, completer_class): + # keep the signature 2.5 compatible (i.e. do not use f(*a, k=v) + super(CythonCommand, self).__init__(name, command_class, + completer_class, prefix=True) + + commands = dict( + # GDB commands + import_ = CyImport.register(), + break_ = CyBreak.register(), + step = CyStep.register(), + next = CyNext.register(), + run = CyRun.register(), + cont = CyCont.register(), + finish = CyFinish.register(), + up = CyUp.register(), + down = CyDown.register(), + select = CySelect.register(), + bt = CyBacktrace.register(), + list = CyList.register(), + print_ = CyPrint.register(), + locals = CyLocals.register(), + globals = CyGlobals.register(), + exec_ = libpython.FixGdbCommand('cy exec', '-cy-exec'), + _exec = CyExec.register(), + set = CySet.register(), + + # GDB functions + cy_cname = CyCName('cy_cname'), + cy_cvalue = CyCValue('cy_cvalue'), + cy_lineno = CyLine('cy_lineno'), + cy_eval = CyEval('cy_eval'), + ) + + for command_name, command in commands.items(): + command.cy = self + setattr(self, command_name, command) + + self.cy = self + + # Cython module namespace + self.cython_namespace = {} + + # maps (unique) qualified function names (e.g. + # cythonmodule.ClassName.method_name) to the CythonFunction object + self.functions_by_qualified_name = {} + + # unique cnames of Cython functions + self.functions_by_cname = {} + + # map function names like method_name to a list of all such + # CythonFunction objects + self.functions_by_name = collections.defaultdict(list) + + +class CyImport(CythonCommand): + """ + Import debug information outputted by the Cython compiler + Example: cy import FILE... + """ + + name = 'cy import' + command_class = gdb.COMMAND_STATUS + completer_class = gdb.COMPLETE_FILENAME + + @libpython.dont_suppress_errors + def invoke(self, args, from_tty): + if isinstance(args, BYTES): + args = args.decode(_filesystemencoding) + for arg in string_to_argv(args): + try: + f = open(arg) + except OSError as e: + raise gdb.GdbError('Unable to open file %r: %s' % (args, e.args[1])) + + t = etree.parse(f) + + for module in t.getroot(): + cython_module = CythonModule(**module.attrib) + self.cy.cython_namespace[cython_module.name] = cython_module + + for variable in module.find('Globals'): + d = variable.attrib + cython_module.globals[d['name']] = CythonVariable(**d) + + for function in module.find('Functions'): + cython_function = CythonFunction(module=cython_module, + **function.attrib) + + # update the global function mappings + name = cython_function.name + qname = cython_function.qualified_name + + self.cy.functions_by_name[name].append(cython_function) + self.cy.functions_by_qualified_name[ + cython_function.qualified_name] = cython_function + self.cy.functions_by_cname[ + cython_function.cname] = cython_function + + d = cython_module.functions[qname] = cython_function + + for local in function.find('Locals'): + d = local.attrib + cython_function.locals[d['name']] = CythonVariable(**d) + + for step_into_func in function.find('StepIntoFunctions'): + d = step_into_func.attrib + cython_function.step_into_functions.add(d['name']) + + cython_function.arguments.extend( + funcarg.tag for funcarg in function.find('Arguments')) + + for marker in module.find('LineNumberMapping'): + src_lineno = int(marker.attrib['src_lineno']) + src_path = marker.attrib['src_path'] + c_linenos = list(map(int, marker.attrib['c_linenos'].split())) + cython_module.lineno_cy2c[src_path, src_lineno] = min(c_linenos) + for c_lineno in c_linenos: + cython_module.lineno_c2cy[c_lineno] = (src_path, src_lineno) + + +class CyBreak(CythonCommand): + """ + Set a breakpoint for Cython code using Cython qualified name notation, e.g.: + + cy break cython_modulename.ClassName.method_name... + + or normal notation: + + cy break function_or_method_name... + + or for a line number: + + cy break cython_module:lineno... + + Set a Python breakpoint: + Break on any function or method named 'func' in module 'modname' + + cy break -p modname.func... + + Break on any function or method named 'func' + + cy break -p func... + """ + + name = 'cy break' + command_class = gdb.COMMAND_BREAKPOINTS + + def _break_pyx(self, name): + modulename, _, lineno = name.partition(':') + lineno = int(lineno) + if modulename: + cython_module = self.cy.cython_namespace[modulename] + else: + cython_module = self.get_cython_function().module + + if (cython_module.filename, lineno) in cython_module.lineno_cy2c: + c_lineno = cython_module.lineno_cy2c[cython_module.filename, lineno] + breakpoint = '%s:%s' % (cython_module.c_filename, c_lineno) + gdb.execute('break ' + breakpoint) + else: + raise gdb.GdbError("Not a valid line number. " + "Does it contain actual code?") + + def _break_funcname(self, funcname): + func = self.cy.functions_by_qualified_name.get(funcname) + + if func and func.is_initmodule_function: + func = None + + break_funcs = [func] + + if not func: + funcs = self.cy.functions_by_name.get(funcname) or [] + funcs = [f for f in funcs if not f.is_initmodule_function] + + if not funcs: + gdb.execute('break ' + funcname) + return + + if len(funcs) > 1: + # multiple functions, let the user pick one + print('There are multiple such functions:') + for idx, func in enumerate(funcs): + print('%3d) %s' % (idx, func.qualified_name)) + + while True: + try: + result = input( + "Select a function, press 'a' for all " + "functions or press 'q' or '^D' to quit: ") + except EOFError: + return + else: + if result.lower() == 'q': + return + elif result.lower() == 'a': + break_funcs = funcs + break + elif (result.isdigit() and + 0 <= int(result) < len(funcs)): + break_funcs = [funcs[int(result)]] + break + else: + print('Not understood...') + else: + break_funcs = [funcs[0]] + + for func in break_funcs: + gdb.execute('break %s' % func.cname) + if func.pf_cname: + gdb.execute('break %s' % func.pf_cname) + + @libpython.dont_suppress_errors + def invoke(self, function_names, from_tty): + if isinstance(function_names, BYTES): + function_names = function_names.decode(_filesystemencoding) + argv = string_to_argv(function_names) + if function_names.startswith('-p'): + argv = argv[1:] + python_breakpoints = True + else: + python_breakpoints = False + + for funcname in argv: + if python_breakpoints: + gdb.execute('py-break %s' % funcname) + elif ':' in funcname: + self._break_pyx(funcname) + else: + self._break_funcname(funcname) + + @libpython.dont_suppress_errors + def complete(self, text, word): + # Filter init-module functions (breakpoints can be set using + # modulename:linenumber). + names = [n for n, L in self.cy.functions_by_name.items() + if any(not f.is_initmodule_function for f in L)] + qnames = [n for n, f in self.cy.functions_by_qualified_name.items() + if not f.is_initmodule_function] + + if parameters.complete_unqualified: + all_names = itertools.chain(qnames, names) + else: + all_names = qnames + + words = text.strip().split() + if not words or '.' not in words[-1]: + # complete unqualified + seen = set(text[:-len(word)].split()) + return [n for n in all_names + if n.startswith(word) and n not in seen] + + # complete qualified name + lastword = words[-1] + compl = [n for n in qnames if n.startswith(lastword)] + + if len(lastword) > len(word): + # readline sees something (e.g. a '.') as a word boundary, so don't + # "recomplete" this prefix + strip_prefix_length = len(lastword) - len(word) + compl = [n[strip_prefix_length:] for n in compl] + + return compl + + +class CythonInfo(CythonBase, libpython.PythonInfo): + """ + Implementation of the interface dictated by libpython.LanguageInfo. + """ + + def lineno(self, frame): + # Take care of the Python and Cython levels. We need to care for both + # as we can't simply dispatch to 'py-step', since that would work for + # stepping through Python code, but it would not step back into Cython- + # related code. The C level should be dispatched to the 'step' command. + if self.is_cython_function(frame): + return self.get_cython_lineno(frame)[1] + return super(CythonInfo, self).lineno(frame) + + def get_source_line(self, frame): + try: + line = super(CythonInfo, self).get_source_line(frame) + except gdb.GdbError: + return None + else: + return line.strip() or None + + def exc_info(self, frame): + if self.is_python_function: + return super(CythonInfo, self).exc_info(frame) + + def runtime_break_functions(self): + if self.is_cython_function(): + return self.get_cython_function().step_into_functions + return () + + def static_break_functions(self): + result = ['PyEval_EvalFrameEx'] + result.extend(self.cy.functions_by_cname) + return result + + +class CythonExecutionControlCommand(CythonCommand, + libpython.ExecutionControlCommandBase): + + @classmethod + def register(cls): + return cls(cls.name, cython_info) + + +class CyStep(CythonExecutionControlCommand, libpython.PythonStepperMixin): + "Step through Cython, Python or C code." + + name = 'cy -step' + stepinto = True + + @libpython.dont_suppress_errors + def invoke(self, args, from_tty): + if self.is_python_function(): + self.python_step(self.stepinto) + elif not self.is_cython_function(): + if self.stepinto: + command = 'step' + else: + command = 'next' + + self.finish_executing(gdb.execute(command, to_string=True)) + else: + self.step(stepinto=self.stepinto) + + +class CyNext(CyStep): + "Step-over Cython, Python or C code." + + name = 'cy -next' + stepinto = False + + +class CyRun(CythonExecutionControlCommand): + """ + Run a Cython program. This is like the 'run' command, except that it + displays Cython or Python source lines as well + """ + + name = 'cy run' + + invoke = libpython.dont_suppress_errors(CythonExecutionControlCommand.run) + + +class CyCont(CythonExecutionControlCommand): + """ + Continue a Cython program. This is like the 'run' command, except that it + displays Cython or Python source lines as well. + """ + + name = 'cy cont' + invoke = libpython.dont_suppress_errors(CythonExecutionControlCommand.cont) + + +class CyFinish(CythonExecutionControlCommand): + """ + Execute until the function returns. + """ + name = 'cy finish' + + invoke = libpython.dont_suppress_errors(CythonExecutionControlCommand.finish) + + +class CyUp(CythonCommand): + """ + Go up a Cython, Python or relevant C frame. + """ + name = 'cy up' + _command = 'up' + + @libpython.dont_suppress_errors + def invoke(self, *args): + try: + gdb.execute(self._command, to_string=True) + while not self.is_relevant_function(gdb.selected_frame()): + gdb.execute(self._command, to_string=True) + except RuntimeError as e: + raise gdb.GdbError(*e.args) + + frame = gdb.selected_frame() + index = 0 + while frame: + frame = frame.older() + index += 1 + + self.print_stackframe(index=index - 1) + + +class CyDown(CyUp): + """ + Go down a Cython, Python or relevant C frame. + """ + + name = 'cy down' + _command = 'down' + + +class CySelect(CythonCommand): + """ + Select a frame. Use frame numbers as listed in `cy backtrace`. + This command is useful because `cy backtrace` prints a reversed backtrace. + """ + + name = 'cy select' + + @libpython.dont_suppress_errors + def invoke(self, stackno, from_tty): + try: + stackno = int(stackno) + except ValueError: + raise gdb.GdbError("Not a valid number: %r" % (stackno,)) + + frame = gdb.selected_frame() + while frame.newer(): + frame = frame.newer() + + stackdepth = libpython.stackdepth(frame) + + try: + gdb.execute('select %d' % (stackdepth - stackno - 1,)) + except RuntimeError as e: + raise gdb.GdbError(*e.args) + + +class CyBacktrace(CythonCommand): + 'Print the Cython stack' + + name = 'cy bt' + alias = 'cy backtrace' + command_class = gdb.COMMAND_STACK + completer_class = gdb.COMPLETE_NONE + + @libpython.dont_suppress_errors + @require_running_program + def invoke(self, args, from_tty): + # get the first frame + frame = gdb.selected_frame() + while frame.older(): + frame = frame.older() + + print_all = args == '-a' + + index = 0 + while frame: + try: + is_relevant = self.is_relevant_function(frame) + except CyGDBError: + is_relevant = False + + if print_all or is_relevant: + self.print_stackframe(frame, index) + + index += 1 + frame = frame.newer() + + +class CyList(CythonCommand): + """ + List Cython source code. To disable to customize colouring see the cy_* + parameters. + """ + + name = 'cy list' + command_class = gdb.COMMAND_FILES + completer_class = gdb.COMPLETE_NONE + + @libpython.dont_suppress_errors + # @dispatch_on_frame(c_command='list') + def invoke(self, _, from_tty): + sd, lineno = self.get_source_desc() + source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno, + lex_entire=True) + print(source) + + +class CyPrint(CythonCommand): + """ + Print a Cython variable using 'cy-print x' or 'cy-print module.function.x' + """ + + name = 'cy print' + command_class = gdb.COMMAND_DATA + + @libpython.dont_suppress_errors + def invoke(self, name, from_tty): + global_python_dict = self.get_cython_globals_dict() + module_globals = self.get_cython_function().module.globals + + if name in global_python_dict: + value = global_python_dict[name].get_truncated_repr(libpython.MAX_OUTPUT_LEN) + print('%s = %s' % (name, value)) + #This also would work, but because the output of cy exec is not captured in gdb.execute, TestPrint would fail + #self.cy.exec_.invoke("print('"+name+"','=', type(" + name + "), "+name+", flush=True )", from_tty) + elif name in module_globals: + cname = module_globals[name].cname + try: + value = gdb.parse_and_eval(cname) + except RuntimeError: + print("unable to get value of %s" % name) + else: + if not value.is_optimized_out: + self.print_gdb_value(name, value) + else: + print("%s is optimized out" % name) + elif self.is_python_function(): + return gdb.execute('py-print ' + name) + elif self.is_cython_function(): + value = self.cy.cy_cvalue.invoke(name.lstrip('*')) + for c in name: + if c == '*': + value = value.dereference() + else: + break + + self.print_gdb_value(name, value) + else: + gdb.execute('print ' + name) + + def complete(self): + if self.is_cython_function(): + f = self.get_cython_function() + return list(itertools.chain(f.locals, f.globals)) + else: + return [] + + +sortkey = lambda item: item[0].lower() + + +class CyLocals(CythonCommand): + """ + List the locals from the current Cython frame. + """ + + name = 'cy locals' + command_class = gdb.COMMAND_STACK + completer_class = gdb.COMPLETE_NONE + + @libpython.dont_suppress_errors + @dispatch_on_frame(c_command='info locals', python_command='py-locals') + def invoke(self, args, from_tty): + cython_function = self.get_cython_function() + + if cython_function.is_initmodule_function: + self.cy.globals.invoke(args, from_tty) + return + + local_cython_vars = cython_function.locals + max_name_length = len(max(local_cython_vars, key=len)) + for name, cyvar in sorted(local_cython_vars.items(), key=sortkey): + if self.is_initialized(self.get_cython_function(), cyvar.name): + value = gdb.parse_and_eval(cyvar.cname) + if not value.is_optimized_out: + self.print_gdb_value(cyvar.name, value, + max_name_length, '') + + +class CyGlobals(CyLocals): + """ + List the globals from the current Cython module. + """ + + name = 'cy globals' + command_class = gdb.COMMAND_STACK + completer_class = gdb.COMPLETE_NONE + + @libpython.dont_suppress_errors + @dispatch_on_frame(c_command='info variables', python_command='py-globals') + def invoke(self, args, from_tty): + global_python_dict = self.get_cython_globals_dict() + module_globals = self.get_cython_function().module.globals + + max_globals_len = 0 + max_globals_dict_len = 0 + if module_globals: + max_globals_len = len(max(module_globals, key=len)) + if global_python_dict: + max_globals_dict_len = len(max(global_python_dict)) + + max_name_length = max(max_globals_len, max_globals_dict_len) + + seen = set() + print('Python globals:') + + for k, v in sorted(global_python_dict.items(), key=sortkey): + v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN) + seen.add(k) + print(' %-*s = %s' % (max_name_length, k, v)) + + print('C globals:') + for name, cyvar in sorted(module_globals.items(), key=sortkey): + if name not in seen: + try: + value = gdb.parse_and_eval(cyvar.cname) + except RuntimeError: + pass + else: + if not value.is_optimized_out: + self.print_gdb_value(cyvar.name, value, + max_name_length, ' ') + + +class EvaluateOrExecuteCodeMixin(object): + """ + Evaluate or execute Python code in a Cython or Python frame. The 'evalcode' + method evaluations Python code, prints a traceback if an exception went + uncaught, and returns any return value as a gdb.Value (NULL on exception). + """ + + def _fill_locals_dict(self, executor, local_dict_pointer): + "Fill a remotely allocated dict with values from the Cython C stack" + cython_func = self.get_cython_function() + + for name, cyvar in cython_func.locals.items(): + if (cyvar.type == PythonObject + and self.is_initialized(cython_func, name)): + + try: + val = gdb.parse_and_eval(cyvar.cname) + except RuntimeError: + continue + else: + if val.is_optimized_out: + continue + + pystringp = executor.alloc_pystring(name) + code = ''' + (PyObject *) PyDict_SetItem( + (PyObject *) %d, + (PyObject *) %d, + (PyObject *) %s) + ''' % (local_dict_pointer, pystringp, cyvar.cname) + + try: + if gdb.parse_and_eval(code) < 0: + gdb.parse_and_eval('PyErr_Print()') + raise gdb.GdbError("Unable to execute Python code.") + finally: + # PyDict_SetItem doesn't steal our reference + executor.xdecref(pystringp) + + def _find_first_cython_or_python_frame(self): + frame = gdb.selected_frame() + while frame: + if (self.is_cython_function(frame) + or self.is_python_function(frame)): + frame.select() + return frame + + frame = frame.older() + + raise gdb.GdbError("There is no Cython or Python frame on the stack.") + + def _evalcode_cython(self, executor, code, input_type): + with libpython.FetchAndRestoreError(): + # get the dict of Cython globals and construct a dict in the + # inferior with Cython locals + global_dict = gdb.parse_and_eval( + '(PyObject *) PyModule_GetDict(__pyx_m)') + local_dict = gdb.parse_and_eval('(PyObject *) PyDict_New()') + + try: + self._fill_locals_dict(executor, + libpython.pointervalue(local_dict)) + result = executor.evalcode(code, input_type, global_dict, + local_dict) + finally: + executor.xdecref(libpython.pointervalue(local_dict)) + + return result + + def evalcode(self, code, input_type): + """ + Evaluate `code` in a Python or Cython stack frame using the given + `input_type`. + """ + frame = self._find_first_cython_or_python_frame() + executor = libpython.PythonCodeExecutor() + if self.is_python_function(frame): + return libpython._evalcode_python(executor, code, input_type) + return self._evalcode_cython(executor, code, input_type) + + +class CyExec(CythonCommand, libpython.PyExec, EvaluateOrExecuteCodeMixin): + """ + Execute Python code in the nearest Python or Cython frame. + """ + + name = '-cy-exec' + command_class = gdb.COMMAND_STACK + completer_class = gdb.COMPLETE_NONE + + @libpython.dont_suppress_errors + def invoke(self, expr, from_tty): + expr, input_type = self.readcode(expr) + executor = libpython.PythonCodeExecutor() + executor.xdecref(self.evalcode(expr, executor.Py_file_input)) + + +class CySet(CythonCommand): + """ + Set a Cython variable to a certain value + + cy set my_cython_c_variable = 10 + cy set my_cython_py_variable = $cy_eval("{'doner': 'kebab'}") + + This is equivalent to + + set $cy_value("my_cython_variable") = 10 + """ + + name = 'cy set' + command_class = gdb.COMMAND_DATA + completer_class = gdb.COMPLETE_NONE + + @libpython.dont_suppress_errors + @require_cython_frame + def invoke(self, expr, from_tty): + name_and_expr = expr.split('=', 1) + if len(name_and_expr) != 2: + raise gdb.GdbError("Invalid expression. Use 'cy set var = expr'.") + + varname, expr = name_and_expr + cname = self.cy.cy_cname.invoke(varname.strip()) + gdb.execute("set %s = %s" % (cname, expr)) + + +# Functions + +class CyCName(gdb.Function, CythonBase): + """ + Get the C name of a Cython variable in the current context. + Examples: + + print $cy_cname("function") + print $cy_cname("Class.method") + print $cy_cname("module.function") + """ + + @libpython.dont_suppress_errors + @require_cython_frame + @gdb_function_value_to_unicode + def invoke(self, cyname, frame=None): + frame = frame or gdb.selected_frame() + cname = None + + if self.is_cython_function(frame): + cython_function = self.get_cython_function(frame) + if cyname in cython_function.locals: + cname = cython_function.locals[cyname].cname + elif cyname in cython_function.module.globals: + cname = cython_function.module.globals[cyname].cname + else: + qname = '%s.%s' % (cython_function.module.name, cyname) + if qname in cython_function.module.functions: + cname = cython_function.module.functions[qname].cname + + if not cname: + cname = self.cy.functions_by_qualified_name.get(cyname) + + if not cname: + raise gdb.GdbError('No such Cython variable: %s' % cyname) + + return cname + + +class CyCValue(CyCName): + """ + Get the value of a Cython variable. + """ + + @libpython.dont_suppress_errors + @require_cython_frame + @gdb_function_value_to_unicode + def invoke(self, cyname, frame=None): + globals_dict = self.get_cython_globals_dict() + cython_function = self.get_cython_function(frame) + + if self.is_initialized(cython_function, cyname): + cname = super(CyCValue, self).invoke(cyname, frame=frame) + return gdb.parse_and_eval(cname) + elif cyname in globals_dict: + return globals_dict[cyname]._gdbval + else: + raise gdb.GdbError("Variable %s is not initialized." % cyname) + + +class CyLine(gdb.Function, CythonBase): + """ + Get the current Cython line. + """ + + @libpython.dont_suppress_errors + @require_cython_frame + def invoke(self): + return self.get_cython_lineno()[1] + + +class CyEval(gdb.Function, CythonBase, EvaluateOrExecuteCodeMixin): + """ + Evaluate Python code in the nearest Python or Cython frame and return + """ + + @libpython.dont_suppress_errors + @gdb_function_value_to_unicode + def invoke(self, python_expression): + input_type = libpython.PythonCodeExecutor.Py_eval_input + return self.evalcode(python_expression, input_type) + + +cython_info = CythonInfo() +cy = CyCy.register() +cython_info.cy = cy + + +def register_defines(): + libpython.source_gdb_script(textwrap.dedent("""\ + define cy step + cy -step + end + + define cy next + cy -next + end + + document cy step + %s + end + + document cy next + %s + end + """) % (CyStep.__doc__, CyNext.__doc__)) + +register_defines() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/libpython.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/libpython.py new file mode 100644 index 0000000000000000000000000000000000000000..0a680f9cf8eb567c4617675d581e2494ac440536 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/libpython.py @@ -0,0 +1,2851 @@ +#!/usr/bin/python + +# NOTE: Most of this file is taken from the Python source distribution +# It can be found under Tools/gdb/libpython.py. It is shipped with Cython +# because it's not installed as a python module, and because changes are only +# merged into new python versions (v3.2+). +# We added some of our code below the "## added, not in CPython" comment. + +''' +From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb +to be extended with Python code e.g. for library-specific data visualizations, +such as for the C++ STL types. Documentation on this API can be seen at: +http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html + + +This python module deals with the case when the process being debugged (the +"inferior process" in gdb parlance) is itself python, or more specifically, +linked against libpython. In this situation, almost every item of data is a +(PyObject*), and having the debugger merely print their addresses is not very +enlightening. + +This module embeds knowledge about the implementation details of libpython so +that we can emit useful visualizations e.g. a string, a list, a dict, a frame +giving file/line information and the state of local variables + +In particular, given a gdb.Value corresponding to a PyObject* in the inferior +process, we can generate a "proxy value" within the gdb process. For example, +given a PyObject* in the inferior process that is in fact a PyListObject* +holding three PyObject* that turn out to be PyBytesObject* instances, we can +generate a proxy value within the gdb process that is a list of bytes +instances: + [b"foo", b"bar", b"baz"] + +Doing so can be expensive for complicated graphs of objects, and could take +some time, so we also have a "write_repr" method that writes a representation +of the data to a file-like object. This allows us to stop the traversal by +having the file-like object raise an exception if it gets too much data. + +With both "proxyval" and "write_repr" we keep track of the set of all addresses +visited so far in the traversal, to avoid infinite recursion due to cycles in +the graph of object references. + +We try to defer gdb.lookup_type() invocations for python types until as late as +possible: for a dynamically linked python binary, when the process starts in +the debugger, the libpython.so hasn't been dynamically loaded yet, so none of +the type names are known to the debugger + +The module also extends gdb with some python-specific commands. +''' + +# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax +# compatible (2.6+ and 3.0+). See #19308. + +from __future__ import print_function +import gdb +import os +import locale +import sys + +if sys.version_info[0] >= 3: + unichr = chr + xrange = range + long = int + +# Look up the gdb.Type for some standard types: +# Those need to be refreshed as types (pointer sizes) may change when +# gdb loads different executables + +def _type_char_ptr(): + return gdb.lookup_type('char').pointer() # char* + + +def _type_unsigned_char_ptr(): + return gdb.lookup_type('unsigned char').pointer() # unsigned char* + + +def _type_unsigned_short_ptr(): + return gdb.lookup_type('unsigned short').pointer() + + +def _type_unsigned_int_ptr(): + return gdb.lookup_type('unsigned int').pointer() + + +def _sizeof_void_p(): + return gdb.lookup_type('void').pointer().sizeof + + +# value computed later, see PyUnicodeObjectPtr.proxy() +_is_pep393 = None + +Py_TPFLAGS_HEAPTYPE = (1 << 9) +Py_TPFLAGS_LONG_SUBCLASS = (1 << 24) +Py_TPFLAGS_LIST_SUBCLASS = (1 << 25) +Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26) +Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27) +Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28) +Py_TPFLAGS_DICT_SUBCLASS = (1 << 29) +Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30) +Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31) + + +MAX_OUTPUT_LEN=1024 + +hexdigits = "0123456789abcdef" + +ENCODING = locale.getpreferredencoding() + +FRAME_INFO_OPTIMIZED_OUT = '(frame information optimized out)' +UNABLE_READ_INFO_PYTHON_FRAME = 'Unable to read information on python frame' +EVALFRAME = '_PyEval_EvalFrameDefault' + +class NullPyObjectPtr(RuntimeError): + pass + + +def safety_limit(val): + # Given an integer value from the process being debugged, limit it to some + # safety threshold so that arbitrary breakage within said process doesn't + # break the gdb process too much (e.g. sizes of iterations, sizes of lists) + return min(val, 1000) + + +def safe_range(val): + # As per range, but don't trust the value too much: cap it to a safety + # threshold in case the data was corrupted + return xrange(safety_limit(int(val))) + +if sys.version_info[0] >= 3: + def write_unicode(file, text): + file.write(text) +else: + def write_unicode(file, text): + # Write a byte or unicode string to file. Unicode strings are encoded to + # ENCODING encoding with 'backslashreplace' error handler to avoid + # UnicodeEncodeError. + if isinstance(text, unicode): + text = text.encode(ENCODING, 'backslashreplace') + file.write(text) + +try: + os_fsencode = os.fsencode +except AttributeError: + def os_fsencode(filename): + if not isinstance(filename, unicode): + return filename + encoding = sys.getfilesystemencoding() + if encoding == 'mbcs': + # mbcs doesn't support surrogateescape + return filename.encode(encoding) + encoded = [] + for char in filename: + # surrogateescape error handler + if 0xDC80 <= ord(char) <= 0xDCFF: + byte = chr(ord(char) - 0xDC00) + else: + byte = char.encode(encoding) + encoded.append(byte) + return ''.join(encoded) + +class StringTruncated(RuntimeError): + pass + +class TruncatedStringIO(object): + '''Similar to io.StringIO, but can truncate the output by raising a + StringTruncated exception''' + def __init__(self, maxlen=None): + self._val = '' + self.maxlen = maxlen + + def write(self, data): + if self.maxlen: + if len(data) + len(self._val) > self.maxlen: + # Truncation: + self._val += data[0:self.maxlen - len(self._val)] + raise StringTruncated() + + self._val += data + + def getvalue(self): + return self._val + +class PyObjectPtr(object): + """ + Class wrapping a gdb.Value that's either a (PyObject*) within the + inferior process, or some subclass pointer e.g. (PyBytesObject*) + + There will be a subclass for every refined PyObject type that we care + about. + + Note that at every stage the underlying pointer could be NULL, point + to corrupt data, etc; this is the debugger, after all. + """ + _typename = 'PyObject' + + def __init__(self, gdbval, cast_to=None): + if cast_to: + self._gdbval = gdbval.cast(cast_to) + else: + self._gdbval = gdbval + + def field(self, name): + ''' + Get the gdb.Value for the given field within the PyObject, coping with + some python 2 versus python 3 differences. + + Various libpython types are defined using the "PyObject_HEAD" and + "PyObject_VAR_HEAD" macros. + + In Python 2, this these are defined so that "ob_type" and (for a var + object) "ob_size" are fields of the type in question. + + In Python 3, this is defined as an embedded PyVarObject type thus: + PyVarObject ob_base; + so that the "ob_size" field is located insize the "ob_base" field, and + the "ob_type" is most easily accessed by casting back to a (PyObject*). + ''' + if self.is_null(): + raise NullPyObjectPtr(self) + + if name == 'ob_type': + pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type()) + return pyo_ptr.dereference()[name] + + if name == 'ob_size': + pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type()) + return pyo_ptr.dereference()[name] + + # General case: look it up inside the object: + return self._gdbval.dereference()[name] + + def pyop_field(self, name): + ''' + Get a PyObjectPtr for the given PyObject* field within this PyObject, + coping with some python 2 versus python 3 differences. + ''' + return PyObjectPtr.from_pyobject_ptr(self.field(name)) + + def write_field_repr(self, name, out, visited): + ''' + Extract the PyObject* field named "name", and write its representation + to file-like object "out" + ''' + field_obj = self.pyop_field(name) + field_obj.write_repr(out, visited) + + def get_truncated_repr(self, maxlen): + ''' + Get a repr-like string for the data, but truncate it at "maxlen" bytes + (ending the object graph traversal as soon as you do) + ''' + out = TruncatedStringIO(maxlen) + try: + self.write_repr(out, set()) + except StringTruncated: + # Truncation occurred: + return out.getvalue() + '...(truncated)' + + # No truncation occurred: + return out.getvalue() + + def type(self): + return PyTypeObjectPtr(self.field('ob_type')) + + def is_null(self): + return 0 == long(self._gdbval) + + def is_optimized_out(self): + ''' + Is the value of the underlying PyObject* visible to the debugger? + + This can vary with the precise version of the compiler used to build + Python, and the precise version of gdb. + + See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with + PyEval_EvalFrameEx's "f" + ''' + return self._gdbval.is_optimized_out + + def safe_tp_name(self): + try: + ob_type = self.type() + tp_name = ob_type.field('tp_name') + return tp_name.string() + # NullPyObjectPtr: NULL tp_name? + # RuntimeError: Can't even read the object at all? + # UnicodeDecodeError: Failed to decode tp_name bytestring + except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError): + return 'unknown' + + def proxyval(self, visited): + ''' + Scrape a value from the inferior process, and try to represent it + within the gdb process, whilst (hopefully) avoiding crashes when + the remote data is corrupt. + + Derived classes will override this. + + For example, a PyIntObject* with ob_ival 42 in the inferior process + should result in an int(42) in this process. + + visited: a set of all gdb.Value pyobject pointers already visited + whilst generating this value (to guard against infinite recursion when + visiting object graphs with loops). Analogous to Py_ReprEnter and + Py_ReprLeave + ''' + + class FakeRepr(object): + """ + Class representing a non-descript PyObject* value in the inferior + process for when we don't have a custom scraper, intended to have + a sane repr(). + """ + + def __init__(self, tp_name, address): + self.tp_name = tp_name + self.address = address + + def __repr__(self): + # For the NULL pointer, we have no way of knowing a type, so + # special-case it as per + # http://bugs.python.org/issue8032#msg100882 + if self.address == 0: + return '0x0' + return '<%s at remote 0x%x>' % (self.tp_name, self.address) + + return FakeRepr(self.safe_tp_name(), + long(self._gdbval)) + + def write_repr(self, out, visited): + ''' + Write a string representation of the value scraped from the inferior + process to "out", a file-like object. + ''' + # Default implementation: generate a proxy value and write its repr + # However, this could involve a lot of work for complicated objects, + # so for derived classes we specialize this + return out.write(repr(self.proxyval(visited))) + + @classmethod + def subclass_from_type(cls, t): + ''' + Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a + (PyTypeObject*), determine the corresponding subclass of PyObjectPtr + to use + + Ideally, we would look up the symbols for the global types, but that + isn't working yet: + (gdb) python print gdb.lookup_symbol('PyList_Type')[0].value + Traceback (most recent call last): + File "", line 1, in + NotImplementedError: Symbol type not yet supported in Python scripts. + Error while executing Python code. + + For now, we use tp_flags, after doing some string comparisons on the + tp_name for some special-cases that don't seem to be visible through + flags + ''' + try: + tp_name = t.field('tp_name').string() + tp_flags = int(t.field('tp_flags')) + # RuntimeError: NULL pointers + # UnicodeDecodeError: string() fails to decode the bytestring + except (RuntimeError, UnicodeDecodeError): + # Handle any kind of error e.g. NULL ptrs by simply using the base + # class + return cls + + #print('tp_flags = 0x%08x' % tp_flags) + #print('tp_name = %r' % tp_name) + + name_map = {'bool': PyBoolObjectPtr, + 'classobj': PyClassObjectPtr, + 'NoneType': PyNoneStructPtr, + 'frame': PyFrameObjectPtr, + 'set' : PySetObjectPtr, + 'frozenset' : PySetObjectPtr, + 'builtin_function_or_method' : PyCFunctionObjectPtr, + 'method-wrapper': wrapperobject, + } + if tp_name in name_map: + return name_map[tp_name] + + if tp_flags & Py_TPFLAGS_HEAPTYPE: + return HeapTypeObjectPtr + + if tp_flags & Py_TPFLAGS_LONG_SUBCLASS: + return PyLongObjectPtr + if tp_flags & Py_TPFLAGS_LIST_SUBCLASS: + return PyListObjectPtr + if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS: + return PyTupleObjectPtr + if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS: + return PyBytesObjectPtr + if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS: + return PyUnicodeObjectPtr + if tp_flags & Py_TPFLAGS_DICT_SUBCLASS: + return PyDictObjectPtr + if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS: + return PyBaseExceptionObjectPtr + #if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS: + # return PyTypeObjectPtr + + # Use the base class: + return cls + + @classmethod + def from_pyobject_ptr(cls, gdbval): + ''' + Try to locate the appropriate derived class dynamically, and cast + the pointer accordingly. + ''' + try: + p = PyObjectPtr(gdbval) + cls = cls.subclass_from_type(p.type()) + return cls(gdbval, cast_to=cls.get_gdb_type()) + except RuntimeError: + # Handle any kind of error e.g. NULL ptrs by simply using the base + # class + pass + return cls(gdbval) + + @classmethod + def get_gdb_type(cls): + return gdb.lookup_type(cls._typename).pointer() + + def as_address(self): + return long(self._gdbval) + +class PyVarObjectPtr(PyObjectPtr): + _typename = 'PyVarObject' + +class ProxyAlreadyVisited(object): + ''' + Placeholder proxy to use when protecting against infinite recursion due to + loops in the object graph. + + Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave + ''' + def __init__(self, rep): + self._rep = rep + + def __repr__(self): + return self._rep + + +def _write_instance_repr(out, visited, name, pyop_attrdict, address): + '''Shared code for use by all classes: + write a representation to file-like object "out"''' + out.write('<') + out.write(name) + + # Write dictionary of instance attributes: + if isinstance(pyop_attrdict, PyDictObjectPtr): + out.write('(') + first = True + for pyop_arg, pyop_val in pyop_attrdict.iteritems(): + if not first: + out.write(', ') + first = False + out.write(pyop_arg.proxyval(visited)) + out.write('=') + pyop_val.write_repr(out, visited) + out.write(')') + out.write(' at remote 0x%x>' % address) + + +class InstanceProxy(object): + + def __init__(self, cl_name, attrdict, address): + self.cl_name = cl_name + self.attrdict = attrdict + self.address = address + + def __repr__(self): + if isinstance(self.attrdict, dict): + kwargs = ', '.join(["%s=%r" % (arg, val) + for arg, val in self.attrdict.iteritems()]) + return '<%s(%s) at remote 0x%x>' % (self.cl_name, + kwargs, self.address) + else: + return '<%s at remote 0x%x>' % (self.cl_name, + self.address) + +def _PyObject_VAR_SIZE(typeobj, nitems): + if _PyObject_VAR_SIZE._type_size_t is None: + _PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t') + + return ( ( typeobj.field('tp_basicsize') + + nitems * typeobj.field('tp_itemsize') + + (_sizeof_void_p() - 1) + ) & ~(_sizeof_void_p() - 1) + ).cast(_PyObject_VAR_SIZE._type_size_t) +_PyObject_VAR_SIZE._type_size_t = None + +class HeapTypeObjectPtr(PyObjectPtr): + _typename = 'PyObject' + + def get_attr_dict(self): + ''' + Get the PyDictObject ptr representing the attribute dictionary + (or None if there's a problem) + ''' + try: + typeobj = self.type() + dictoffset = int_from_int(typeobj.field('tp_dictoffset')) + if dictoffset != 0: + if dictoffset < 0: + type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer() + tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size']) + if tsize < 0: + tsize = -tsize + size = _PyObject_VAR_SIZE(typeobj, tsize) + dictoffset += size + assert dictoffset > 0 + assert dictoffset % _sizeof_void_p() == 0 + + dictptr = self._gdbval.cast(_type_char_ptr()) + dictoffset + PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer() + dictptr = dictptr.cast(PyObjectPtrPtr) + return PyObjectPtr.from_pyobject_ptr(dictptr.dereference()) + except RuntimeError: + # Corrupt data somewhere; fail safe + pass + + # Not found, or some kind of error: + return None + + def proxyval(self, visited): + ''' + Support for classes. + + Currently we just locate the dictionary using a transliteration to + python of _PyObject_GetDictPtr, ignoring descriptors + ''' + # Guard against infinite loops: + if self.as_address() in visited: + return ProxyAlreadyVisited('<...>') + visited.add(self.as_address()) + + pyop_attr_dict = self.get_attr_dict() + if pyop_attr_dict: + attr_dict = pyop_attr_dict.proxyval(visited) + else: + attr_dict = {} + tp_name = self.safe_tp_name() + + # Class: + return InstanceProxy(tp_name, attr_dict, long(self._gdbval)) + + def write_repr(self, out, visited): + # Guard against infinite loops: + if self.as_address() in visited: + out.write('<...>') + return + visited.add(self.as_address()) + + pyop_attrdict = self.get_attr_dict() + _write_instance_repr(out, visited, + self.safe_tp_name(), pyop_attrdict, self.as_address()) + +class ProxyException(Exception): + def __init__(self, tp_name, args): + self.tp_name = tp_name + self.args = args + + def __repr__(self): + return '%s%r' % (self.tp_name, self.args) + +class PyBaseExceptionObjectPtr(PyObjectPtr): + """ + Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception + within the process being debugged. + """ + _typename = 'PyBaseExceptionObject' + + def proxyval(self, visited): + # Guard against infinite loops: + if self.as_address() in visited: + return ProxyAlreadyVisited('(...)') + visited.add(self.as_address()) + arg_proxy = self.pyop_field('args').proxyval(visited) + return ProxyException(self.safe_tp_name(), + arg_proxy) + + def write_repr(self, out, visited): + # Guard against infinite loops: + if self.as_address() in visited: + out.write('(...)') + return + visited.add(self.as_address()) + + out.write(self.safe_tp_name()) + self.write_field_repr('args', out, visited) + +class PyClassObjectPtr(PyObjectPtr): + """ + Class wrapping a gdb.Value that's a PyClassObject* i.e. a + instance within the process being debugged. + """ + _typename = 'PyClassObject' + + +class BuiltInFunctionProxy(object): + def __init__(self, ml_name): + self.ml_name = ml_name + + def __repr__(self): + return "" % self.ml_name + +class BuiltInMethodProxy(object): + def __init__(self, ml_name, pyop_m_self): + self.ml_name = ml_name + self.pyop_m_self = pyop_m_self + + def __repr__(self): + return ('' + % (self.ml_name, + self.pyop_m_self.safe_tp_name(), + self.pyop_m_self.as_address()) + ) + +class PyCFunctionObjectPtr(PyObjectPtr): + """ + Class wrapping a gdb.Value that's a PyCFunctionObject* + (see Include/methodobject.h and Objects/methodobject.c) + """ + _typename = 'PyCFunctionObject' + + def proxyval(self, visited): + m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*) + try: + ml_name = m_ml['ml_name'].string() + except UnicodeDecodeError: + ml_name = '' + + pyop_m_self = self.pyop_field('m_self') + if pyop_m_self.is_null(): + return BuiltInFunctionProxy(ml_name) + else: + return BuiltInMethodProxy(ml_name, pyop_m_self) + + +class PyCodeObjectPtr(PyObjectPtr): + """ + Class wrapping a gdb.Value that's a PyCodeObject* i.e. a instance + within the process being debugged. + """ + _typename = 'PyCodeObject' + + def addr2line(self, addrq): + ''' + Get the line number for a given bytecode offset + + Analogous to PyCode_Addr2Line; translated from pseudocode in + Objects/lnotab_notes.txt + ''' + co_lnotab = self.pyop_field('co_lnotab').proxyval(set()) + + # Initialize lineno to co_firstlineno as per PyCode_Addr2Line + # not 0, as lnotab_notes.txt has it: + lineno = int_from_int(self.field('co_firstlineno')) + + addr = 0 + for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]): + addr += ord(addr_incr) + if addr > addrq: + return lineno + lineno += ord(line_incr) + return lineno + + +class PyDictObjectPtr(PyObjectPtr): + """ + Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance + within the process being debugged. + """ + _typename = 'PyDictObject' + + def iteritems(self): + ''' + Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs, + analogous to dict.iteritems() + ''' + keys = self.field('ma_keys') + values = self.field('ma_values') + entries, nentries = self._get_entries(keys) + for i in safe_range(nentries): + ep = entries[i] + if long(values): + pyop_value = PyObjectPtr.from_pyobject_ptr(values[i]) + else: + pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value']) + if not pyop_value.is_null(): + pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key']) + yield (pyop_key, pyop_value) + + def proxyval(self, visited): + # Guard against infinite loops: + if self.as_address() in visited: + return ProxyAlreadyVisited('{...}') + visited.add(self.as_address()) + + result = {} + for pyop_key, pyop_value in self.iteritems(): + proxy_key = pyop_key.proxyval(visited) + proxy_value = pyop_value.proxyval(visited) + result[proxy_key] = proxy_value + return result + + def write_repr(self, out, visited): + # Guard against infinite loops: + if self.as_address() in visited: + out.write('{...}') + return + visited.add(self.as_address()) + + out.write('{') + first = True + for pyop_key, pyop_value in self.iteritems(): + if not first: + out.write(', ') + first = False + pyop_key.write_repr(out, visited) + out.write(': ') + pyop_value.write_repr(out, visited) + out.write('}') + + def _get_entries(self, keys): + dk_nentries = int(keys['dk_nentries']) + dk_size = int(keys['dk_size']) + try: + # <= Python 3.5 + return keys['dk_entries'], dk_size + except RuntimeError: + # >= Python 3.6 + pass + + if dk_size <= 0xFF: + offset = dk_size + elif dk_size <= 0xFFFF: + offset = 2 * dk_size + elif dk_size <= 0xFFFFFFFF: + offset = 4 * dk_size + else: + offset = 8 * dk_size + + ent_addr = keys['dk_indices'].address + ent_addr = ent_addr.cast(_type_unsigned_char_ptr()) + offset + ent_ptr_t = gdb.lookup_type('PyDictKeyEntry').pointer() + ent_addr = ent_addr.cast(ent_ptr_t) + + return ent_addr, dk_nentries + + +class PyListObjectPtr(PyObjectPtr): + _typename = 'PyListObject' + + def __getitem__(self, i): + # Get the gdb.Value for the (PyObject*) with the given index: + field_ob_item = self.field('ob_item') + return field_ob_item[i] + + def proxyval(self, visited): + # Guard against infinite loops: + if self.as_address() in visited: + return ProxyAlreadyVisited('[...]') + visited.add(self.as_address()) + + result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited) + for i in safe_range(int_from_int(self.field('ob_size')))] + return result + + def write_repr(self, out, visited): + # Guard against infinite loops: + if self.as_address() in visited: + out.write('[...]') + return + visited.add(self.as_address()) + + out.write('[') + for i in safe_range(int_from_int(self.field('ob_size'))): + if i > 0: + out.write(', ') + element = PyObjectPtr.from_pyobject_ptr(self[i]) + element.write_repr(out, visited) + out.write(']') + +class PyLongObjectPtr(PyObjectPtr): + _typename = 'PyLongObject' + + def proxyval(self, visited): + ''' + Python's Include/longobjrep.h has this declaration: + struct _longobject { + PyObject_VAR_HEAD + digit ob_digit[1]; + }; + + with this description: + The absolute value of a number is equal to + SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i) + Negative numbers are represented with ob_size < 0; + zero is represented by ob_size == 0. + + where SHIFT can be either: + #define PyLong_SHIFT 30 + #define PyLong_SHIFT 15 + ''' + ob_size = long(self.field('ob_size')) + if ob_size == 0: + return 0 + + ob_digit = self.field('ob_digit') + + if gdb.lookup_type('digit').sizeof == 2: + SHIFT = 15 + else: + SHIFT = 30 + + digits = [long(ob_digit[i]) * 2**(SHIFT*i) + for i in safe_range(abs(ob_size))] + result = sum(digits) + if ob_size < 0: + result = -result + return result + + def write_repr(self, out, visited): + # Write this out as a Python 3 int literal, i.e. without the "L" suffix + proxy = self.proxyval(visited) + out.write("%s" % proxy) + + +class PyBoolObjectPtr(PyLongObjectPtr): + """ + Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two + instances (Py_True/Py_False) within the process being debugged. + """ + def proxyval(self, visited): + if PyLongObjectPtr.proxyval(self, visited): + return True + else: + return False + +class PyNoneStructPtr(PyObjectPtr): + """ + Class wrapping a gdb.Value that's a PyObject* pointing to the + singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type + """ + _typename = 'PyObject' + + def proxyval(self, visited): + return None + + +class PyFrameObjectPtr(PyObjectPtr): + _typename = 'PyFrameObject' + + def __init__(self, gdbval, cast_to=None): + PyObjectPtr.__init__(self, gdbval, cast_to) + + if not self.is_optimized_out(): + self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code')) + self.co_name = self.co.pyop_field('co_name') + self.co_filename = self.co.pyop_field('co_filename') + + self.f_lineno = int_from_int(self.field('f_lineno')) + self.f_lasti = int_from_int(self.field('f_lasti')) + self.co_nlocals = int_from_int(self.co.field('co_nlocals')) + self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames')) + + def iter_locals(self): + ''' + Yield a sequence of (name,value) pairs of PyObjectPtr instances, for + the local variables of this frame + ''' + if self.is_optimized_out(): + return + + f_localsplus = self.field('f_localsplus') + for i in safe_range(self.co_nlocals): + pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i]) + if not pyop_value.is_null(): + pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i]) + yield (pyop_name, pyop_value) + + def iter_globals(self): + ''' + Yield a sequence of (name,value) pairs of PyObjectPtr instances, for + the global variables of this frame + ''' + if self.is_optimized_out(): + return () + + pyop_globals = self.pyop_field('f_globals') + return pyop_globals.iteritems() + + def iter_builtins(self): + ''' + Yield a sequence of (name,value) pairs of PyObjectPtr instances, for + the builtin variables + ''' + if self.is_optimized_out(): + return () + + pyop_builtins = self.pyop_field('f_builtins') + return pyop_builtins.iteritems() + + def get_var_by_name(self, name): + ''' + Look for the named local variable, returning a (PyObjectPtr, scope) pair + where scope is a string 'local', 'global', 'builtin' + + If not found, return (None, None) + ''' + for pyop_name, pyop_value in self.iter_locals(): + if name == pyop_name.proxyval(set()): + return pyop_value, 'local' + for pyop_name, pyop_value in self.iter_globals(): + if name == pyop_name.proxyval(set()): + return pyop_value, 'global' + for pyop_name, pyop_value in self.iter_builtins(): + if name == pyop_name.proxyval(set()): + return pyop_value, 'builtin' + return None, None + + def filename(self): + '''Get the path of the current Python source file, as a string''' + if self.is_optimized_out(): + return FRAME_INFO_OPTIMIZED_OUT + return self.co_filename.proxyval(set()) + + def current_line_num(self): + '''Get current line number as an integer (1-based) + + Translated from PyFrame_GetLineNumber and PyCode_Addr2Line + + See Objects/lnotab_notes.txt + ''' + if self.is_optimized_out(): + return None + f_trace = self.field('f_trace') + if long(f_trace) != 0: + # we have a non-NULL f_trace: + return self.f_lineno + + try: + return self.co.addr2line(self.f_lasti) + except Exception: + # bpo-34989: addr2line() is a complex function, it can fail in many + # ways. For example, it fails with a TypeError on "FakeRepr" if + # gdb fails to load debug symbols. Use a catch-all "except + # Exception" to make the whole function safe. The caller has to + # handle None anyway for optimized Python. + return None + + def current_line(self): + '''Get the text of the current source line as a string, with a trailing + newline character''' + if self.is_optimized_out(): + return FRAME_INFO_OPTIMIZED_OUT + + lineno = self.current_line_num() + if lineno is None: + return '(failed to get frame line number)' + + filename = self.filename() + try: + with open(os_fsencode(filename), 'r') as fp: + lines = fp.readlines() + except IOError: + return None + + try: + # Convert from 1-based current_line_num to 0-based list offset + return lines[lineno - 1] + except IndexError: + return None + + def write_repr(self, out, visited): + if self.is_optimized_out(): + out.write(FRAME_INFO_OPTIMIZED_OUT) + return + lineno = self.current_line_num() + lineno = str(lineno) if lineno is not None else "?" + out.write('Frame 0x%x, for file %s, line %s, in %s (' + % (self.as_address(), + self.co_filename.proxyval(visited), + lineno, + self.co_name.proxyval(visited))) + first = True + for pyop_name, pyop_value in self.iter_locals(): + if not first: + out.write(', ') + first = False + + out.write(pyop_name.proxyval(visited)) + out.write('=') + pyop_value.write_repr(out, visited) + + out.write(')') + + def print_traceback(self): + if self.is_optimized_out(): + sys.stdout.write(' %s\n' % FRAME_INFO_OPTIMIZED_OUT) + return + visited = set() + lineno = self.current_line_num() + lineno = str(lineno) if lineno is not None else "?" + sys.stdout.write(' File "%s", line %s, in %s\n' + % (self.co_filename.proxyval(visited), + lineno, + self.co_name.proxyval(visited))) + +class PySetObjectPtr(PyObjectPtr): + _typename = 'PySetObject' + + @classmethod + def _dummy_key(self): + return gdb.lookup_global_symbol('_PySet_Dummy').value() + + def __iter__(self): + dummy_ptr = self._dummy_key() + table = self.field('table') + for i in safe_range(self.field('mask') + 1): + setentry = table[i] + key = setentry['key'] + if key != 0 and key != dummy_ptr: + yield PyObjectPtr.from_pyobject_ptr(key) + + def proxyval(self, visited): + # Guard against infinite loops: + if self.as_address() in visited: + return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name()) + visited.add(self.as_address()) + + members = (key.proxyval(visited) for key in self) + if self.safe_tp_name() == 'frozenset': + return frozenset(members) + else: + return set(members) + + def write_repr(self, out, visited): + # Emulate Python 3's set_repr + tp_name = self.safe_tp_name() + + # Guard against infinite loops: + if self.as_address() in visited: + out.write('(...)') + return + visited.add(self.as_address()) + + # Python 3's set_repr special-cases the empty set: + if not self.field('used'): + out.write(tp_name) + out.write('()') + return + + # Python 3 uses {} for set literals: + if tp_name != 'set': + out.write(tp_name) + out.write('(') + + out.write('{') + first = True + for key in self: + if not first: + out.write(', ') + first = False + key.write_repr(out, visited) + out.write('}') + + if tp_name != 'set': + out.write(')') + + +class PyBytesObjectPtr(PyObjectPtr): + _typename = 'PyBytesObject' + + def __str__(self): + field_ob_size = self.field('ob_size') + field_ob_sval = self.field('ob_sval') + char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr()) + return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)]) + + def proxyval(self, visited): + return str(self) + + def write_repr(self, out, visited): + # Write this out as a Python 3 bytes literal, i.e. with a "b" prefix + + # Get a PyStringObject* within the Python 2 gdb process: + proxy = self.proxyval(visited) + + # Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr + # to Python 2 code: + quote = "'" + if "'" in proxy and not '"' in proxy: + quote = '"' + out.write('b') + out.write(quote) + for byte in proxy: + if byte == quote or byte == '\\': + out.write('\\') + out.write(byte) + elif byte == '\t': + out.write('\\t') + elif byte == '\n': + out.write('\\n') + elif byte == '\r': + out.write('\\r') + elif byte < ' ' or ord(byte) >= 0x7f: + out.write('\\x') + out.write(hexdigits[(ord(byte) & 0xf0) >> 4]) + out.write(hexdigits[ord(byte) & 0xf]) + else: + out.write(byte) + out.write(quote) + +class PyTupleObjectPtr(PyObjectPtr): + _typename = 'PyTupleObject' + + def __getitem__(self, i): + # Get the gdb.Value for the (PyObject*) with the given index: + field_ob_item = self.field('ob_item') + return field_ob_item[i] + + def proxyval(self, visited): + # Guard against infinite loops: + if self.as_address() in visited: + return ProxyAlreadyVisited('(...)') + visited.add(self.as_address()) + + result = tuple(PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited) + for i in safe_range(int_from_int(self.field('ob_size')))) + return result + + def write_repr(self, out, visited): + # Guard against infinite loops: + if self.as_address() in visited: + out.write('(...)') + return + visited.add(self.as_address()) + + out.write('(') + for i in safe_range(int_from_int(self.field('ob_size'))): + if i > 0: + out.write(', ') + element = PyObjectPtr.from_pyobject_ptr(self[i]) + element.write_repr(out, visited) + if self.field('ob_size') == 1: + out.write(',)') + else: + out.write(')') + +class PyTypeObjectPtr(PyObjectPtr): + _typename = 'PyTypeObject' + + +def _unichr_is_printable(char): + # Logic adapted from Python 3's Tools/unicode/makeunicodedata.py + if char == u" ": + return True + import unicodedata + return unicodedata.category(char) not in ("C", "Z") + +if sys.maxunicode >= 0x10000: + _unichr = unichr +else: + # Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb + def _unichr(x): + if x < 0x10000: + return unichr(x) + x -= 0x10000 + ch1 = 0xD800 | (x >> 10) + ch2 = 0xDC00 | (x & 0x3FF) + return unichr(ch1) + unichr(ch2) + + +class PyUnicodeObjectPtr(PyObjectPtr): + _typename = 'PyUnicodeObject' + + def char_width(self): + _type_Py_UNICODE = gdb.lookup_type('Py_UNICODE') + return _type_Py_UNICODE.sizeof + + def proxyval(self, visited): + global _is_pep393 + if _is_pep393 is None: + fields = gdb.lookup_type('PyUnicodeObject').fields() + _is_pep393 = 'data' in [f.name for f in fields] + if _is_pep393: + # Python 3.3 and newer + may_have_surrogates = False + compact = self.field('_base') + ascii = compact['_base'] + state = ascii['state'] + is_compact_ascii = (int(state['ascii']) and int(state['compact'])) + if not int(state['ready']): + # string is not ready + field_length = long(compact['wstr_length']) + may_have_surrogates = True + field_str = ascii['wstr'] + else: + field_length = long(ascii['length']) + if is_compact_ascii: + field_str = ascii.address + 1 + elif int(state['compact']): + field_str = compact.address + 1 + else: + field_str = self.field('data')['any'] + repr_kind = int(state['kind']) + if repr_kind == 1: + field_str = field_str.cast(_type_unsigned_char_ptr()) + elif repr_kind == 2: + field_str = field_str.cast(_type_unsigned_short_ptr()) + elif repr_kind == 4: + field_str = field_str.cast(_type_unsigned_int_ptr()) + else: + # Python 3.2 and earlier + field_length = long(self.field('length')) + field_str = self.field('str') + may_have_surrogates = self.char_width() == 2 + + # Gather a list of ints from the Py_UNICODE array; these are either + # UCS-1, UCS-2 or UCS-4 code points: + if not may_have_surrogates: + Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)] + else: + # A more elaborate routine if sizeof(Py_UNICODE) is 2 in the + # inferior process: we must join surrogate pairs. + Py_UNICODEs = [] + i = 0 + limit = safety_limit(field_length) + while i < limit: + ucs = int(field_str[i]) + i += 1 + if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length: + Py_UNICODEs.append(ucs) + continue + # This could be a surrogate pair. + ucs2 = int(field_str[i]) + if ucs2 < 0xDC00 or ucs2 > 0xDFFF: + continue + code = (ucs & 0x03FF) << 10 + code |= ucs2 & 0x03FF + code += 0x00010000 + Py_UNICODEs.append(code) + i += 1 + + # Convert the int code points to unicode characters, and generate a + # local unicode instance. + # This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb). + result = u''.join([ + (_unichr(ucs) if ucs <= 0x10ffff else '\ufffd') + for ucs in Py_UNICODEs]) + return result + + def write_repr(self, out, visited): + # Write this out as a Python 3 str literal, i.e. without a "u" prefix + + # Get a PyUnicodeObject* within the Python 2 gdb process: + proxy = self.proxyval(visited) + + # Transliteration of Python 3's Object/unicodeobject.c:unicode_repr + # to Python 2: + if "'" in proxy and '"' not in proxy: + quote = '"' + else: + quote = "'" + out.write(quote) + + i = 0 + while i < len(proxy): + ch = proxy[i] + i += 1 + + # Escape quotes and backslashes + if ch == quote or ch == '\\': + out.write('\\') + out.write(ch) + + # Map special whitespace to '\t', \n', '\r' + elif ch == '\t': + out.write('\\t') + elif ch == '\n': + out.write('\\n') + elif ch == '\r': + out.write('\\r') + + # Map non-printable US ASCII to '\xhh' */ + elif ch < ' ' or ch == 0x7F: + out.write('\\x') + out.write(hexdigits[(ord(ch) >> 4) & 0x000F]) + out.write(hexdigits[ord(ch) & 0x000F]) + + # Copy ASCII characters as-is + elif ord(ch) < 0x7F: + out.write(ch) + + # Non-ASCII characters + else: + ucs = ch + ch2 = None + if sys.maxunicode < 0x10000: + # If sizeof(Py_UNICODE) is 2 here (in gdb), join + # surrogate pairs before calling _unichr_is_printable. + if (i < len(proxy) + and 0xD800 <= ord(ch) < 0xDC00 + and 0xDC00 <= ord(proxy[i]) <= 0xDFFF): + ch2 = proxy[i] + ucs = ch + ch2 + i += 1 + + # Unfortunately, Python 2's unicode type doesn't seem + # to expose the "isprintable" method + printable = _unichr_is_printable(ucs) + if printable: + try: + ucs.encode(ENCODING) + except UnicodeEncodeError: + printable = False + + # Map Unicode whitespace and control characters + # (categories Z* and C* except ASCII space) + if not printable: + if ch2 is not None: + # Match Python 3's representation of non-printable + # wide characters. + code = (ord(ch) & 0x03FF) << 10 + code |= ord(ch2) & 0x03FF + code += 0x00010000 + else: + code = ord(ucs) + + # Map 8-bit characters to '\\xhh' + if code <= 0xff: + out.write('\\x') + out.write(hexdigits[(code >> 4) & 0x000F]) + out.write(hexdigits[code & 0x000F]) + # Map 21-bit characters to '\U00xxxxxx' + elif code >= 0x10000: + out.write('\\U') + out.write(hexdigits[(code >> 28) & 0x0000000F]) + out.write(hexdigits[(code >> 24) & 0x0000000F]) + out.write(hexdigits[(code >> 20) & 0x0000000F]) + out.write(hexdigits[(code >> 16) & 0x0000000F]) + out.write(hexdigits[(code >> 12) & 0x0000000F]) + out.write(hexdigits[(code >> 8) & 0x0000000F]) + out.write(hexdigits[(code >> 4) & 0x0000000F]) + out.write(hexdigits[code & 0x0000000F]) + # Map 16-bit characters to '\uxxxx' + else: + out.write('\\u') + out.write(hexdigits[(code >> 12) & 0x000F]) + out.write(hexdigits[(code >> 8) & 0x000F]) + out.write(hexdigits[(code >> 4) & 0x000F]) + out.write(hexdigits[code & 0x000F]) + else: + # Copy characters as-is + out.write(ch) + if ch2 is not None: + out.write(ch2) + + out.write(quote) + + +class wrapperobject(PyObjectPtr): + _typename = 'wrapperobject' + + def safe_name(self): + try: + name = self.field('descr')['d_base']['name'].string() + return repr(name) + except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError): + return '' + + def safe_tp_name(self): + try: + return self.field('self')['ob_type']['tp_name'].string() + except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError): + return '' + + def safe_self_addresss(self): + try: + address = long(self.field('self')) + return '%#x' % address + except (NullPyObjectPtr, RuntimeError): + return '' + + def proxyval(self, visited): + name = self.safe_name() + tp_name = self.safe_tp_name() + self_address = self.safe_self_addresss() + return ("" + % (name, tp_name, self_address)) + + def write_repr(self, out, visited): + proxy = self.proxyval(visited) + out.write(proxy) + + +def int_from_int(gdbval): + return int(gdbval) + + +def stringify(val): + # TODO: repr() puts everything on one line; pformat can be nicer, but + # can lead to v.long results; this function isolates the choice + if True: + return repr(val) + else: + from pprint import pformat + return pformat(val) + + +class PyObjectPtrPrinter: + "Prints a (PyObject*)" + + def __init__ (self, gdbval): + self.gdbval = gdbval + + def to_string (self): + pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval) + if True: + return pyop.get_truncated_repr(MAX_OUTPUT_LEN) + else: + # Generate full proxy value then stringify it. + # Doing so could be expensive + proxyval = pyop.proxyval(set()) + return stringify(proxyval) + +def pretty_printer_lookup(gdbval): + type = gdbval.type.unqualified() + if type.code != gdb.TYPE_CODE_PTR: + return None + + type = type.target().unqualified() + t = str(type) + if t in ("PyObject", "PyFrameObject", "PyUnicodeObject", "wrapperobject"): + return PyObjectPtrPrinter(gdbval) + +""" +During development, I've been manually invoking the code in this way: +(gdb) python + +import sys +sys.path.append('/home/david/coding/python-gdb') +import libpython +end + +then reloading it after each edit like this: +(gdb) python reload(libpython) + +The following code should ensure that the prettyprinter is registered +if the code is autoloaded by gdb when visiting libpython.so, provided +that this python file is installed to the same path as the library (or its +.debug file) plus a "-gdb.py" suffix, e.g: + /usr/lib/libpython2.6.so.1.0-gdb.py + /usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py +""" +def register (obj): + if obj is None: + obj = gdb + + # Wire up the pretty-printer + obj.pretty_printers.append(pretty_printer_lookup) + +register (gdb.current_objfile ()) + + + +# Unfortunately, the exact API exposed by the gdb module varies somewhat +# from build to build +# See http://bugs.python.org/issue8279?#msg102276 + +class Frame(object): + ''' + Wrapper for gdb.Frame, adding various methods + ''' + def __init__(self, gdbframe): + self._gdbframe = gdbframe + + def older(self): + older = self._gdbframe.older() + if older: + return Frame(older) + else: + return None + + def newer(self): + newer = self._gdbframe.newer() + if newer: + return Frame(newer) + else: + return None + + def select(self): + '''If supported, select this frame and return True; return False if unsupported + + Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12 + onwards, but absent on Ubuntu buildbot''' + if not hasattr(self._gdbframe, 'select'): + print ('Unable to select frame: ' + 'this build of gdb does not expose a gdb.Frame.select method') + return False + self._gdbframe.select() + return True + + def get_index(self): + '''Calculate index of frame, starting at 0 for the newest frame within + this thread''' + index = 0 + # Go down until you reach the newest frame: + iter_frame = self + while iter_frame.newer(): + index += 1 + iter_frame = iter_frame.newer() + return index + + # We divide frames into: + # - "python frames": + # - "bytecode frames" i.e. PyEval_EvalFrameEx + # - "other python frames": things that are of interest from a python + # POV, but aren't bytecode (e.g. GC, GIL) + # - everything else + + def is_python_frame(self): + '''Is this a _PyEval_EvalFrameDefault frame, or some other important + frame? (see is_other_python_frame for what "important" means in this + context)''' + if self.is_evalframe(): + return True + if self.is_other_python_frame(): + return True + return False + + def is_evalframe(self): + '''Is this a _PyEval_EvalFrameDefault frame?''' + if self._gdbframe.name() == EVALFRAME: + ''' + I believe we also need to filter on the inline + struct frame_id.inline_depth, only regarding frames with + an inline depth of 0 as actually being this function + + So we reject those with type gdb.INLINE_FRAME + ''' + if self._gdbframe.type() == gdb.NORMAL_FRAME: + # We have a _PyEval_EvalFrameDefault frame: + return True + + return False + + def is_other_python_frame(self): + '''Is this frame worth displaying in python backtraces? + Examples: + - waiting on the GIL + - garbage-collecting + - within a CFunction + If it is, return a descriptive string + For other frames, return False + ''' + if self.is_waiting_for_gil(): + return 'Waiting for the GIL' + + if self.is_gc_collect(): + return 'Garbage-collecting' + + # Detect invocations of PyCFunction instances: + frame = self._gdbframe + caller = frame.name() + if not caller: + return False + + if (caller.startswith('cfunction_vectorcall_') or + caller == 'cfunction_call'): + arg_name = 'func' + # Within that frame: + # "func" is the local containing the PyObject* of the + # PyCFunctionObject instance + # "f" is the same value, but cast to (PyCFunctionObject*) + # "self" is the (PyObject*) of the 'self' + try: + # Use the prettyprinter for the func: + func = frame.read_var(arg_name) + return str(func) + except ValueError: + return ('PyCFunction invocation (unable to read %s: ' + 'missing debuginfos?)' % arg_name) + except RuntimeError: + return 'PyCFunction invocation (unable to read %s)' % arg_name + + if caller == 'wrapper_call': + arg_name = 'wp' + try: + func = frame.read_var(arg_name) + return str(func) + except ValueError: + return ('' % arg_name) + except RuntimeError: + return '' % arg_name + + # This frame isn't worth reporting: + return False + + def is_waiting_for_gil(self): + '''Is this frame waiting on the GIL?''' + # This assumes the _POSIX_THREADS version of Python/ceval_gil.h: + name = self._gdbframe.name() + if name: + return (name == 'take_gil') + + def is_gc_collect(self): + '''Is this frame "collect" within the garbage-collector?''' + return self._gdbframe.name() == 'collect' + + def get_pyop(self): + try: + f = self._gdbframe.read_var('f') + frame = PyFrameObjectPtr.from_pyobject_ptr(f) + if not frame.is_optimized_out(): + return frame + # gdb is unable to get the "f" argument of PyEval_EvalFrameEx() + # because it was "optimized out". Try to get "f" from the frame + # of the caller, PyEval_EvalCodeEx(). + orig_frame = frame + caller = self._gdbframe.older() + if caller: + f = caller.read_var('f') + frame = PyFrameObjectPtr.from_pyobject_ptr(f) + if not frame.is_optimized_out(): + return frame + return orig_frame + except ValueError: + return None + + @classmethod + def get_selected_frame(cls): + _gdbframe = gdb.selected_frame() + if _gdbframe: + return Frame(_gdbframe) + return None + + @classmethod + def get_selected_python_frame(cls): + '''Try to obtain the Frame for the python-related code in the selected + frame, or None''' + try: + frame = cls.get_selected_frame() + except gdb.error: + # No frame: Python didn't start yet + return None + + while frame: + if frame.is_python_frame(): + return frame + frame = frame.older() + + # Not found: + return None + + @classmethod + def get_selected_bytecode_frame(cls): + '''Try to obtain the Frame for the python bytecode interpreter in the + selected GDB frame, or None''' + frame = cls.get_selected_frame() + + while frame: + if frame.is_evalframe(): + return frame + frame = frame.older() + + # Not found: + return None + + def print_summary(self): + if self.is_evalframe(): + pyop = self.get_pyop() + if pyop: + line = pyop.get_truncated_repr(MAX_OUTPUT_LEN) + write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line)) + if not pyop.is_optimized_out(): + line = pyop.current_line() + if line is not None: + sys.stdout.write(' %s\n' % line.strip()) + else: + sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index()) + else: + info = self.is_other_python_frame() + if info: + sys.stdout.write('#%i %s\n' % (self.get_index(), info)) + else: + sys.stdout.write('#%i\n' % self.get_index()) + + def print_traceback(self): + if self.is_evalframe(): + pyop = self.get_pyop() + if pyop: + pyop.print_traceback() + if not pyop.is_optimized_out(): + line = pyop.current_line() + if line is not None: + sys.stdout.write(' %s\n' % line.strip()) + else: + sys.stdout.write(' (unable to read python frame information)\n') + else: + info = self.is_other_python_frame() + if info: + sys.stdout.write(' %s\n' % info) + else: + sys.stdout.write(' (not a python frame)\n') + +class PyList(gdb.Command): + '''List the current Python source code, if any + + Use + py-list START + to list at a different line number within the python source. + + Use + py-list START, END + to list a specific range of lines within the python source. + ''' + + def __init__(self): + gdb.Command.__init__ (self, + "py-list", + gdb.COMMAND_FILES, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + import re + + start = None + end = None + + m = re.match(r'\s*(\d+)\s*', args) + if m: + start = int(m.group(0)) + end = start + 10 + + m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args) + if m: + start, end = map(int, m.groups()) + + # py-list requires an actual PyEval_EvalFrameEx frame: + frame = Frame.get_selected_bytecode_frame() + if not frame: + print('Unable to locate gdb frame for python bytecode interpreter') + return + + pyop = frame.get_pyop() + if not pyop or pyop.is_optimized_out(): + print(UNABLE_READ_INFO_PYTHON_FRAME) + return + + filename = pyop.filename() + lineno = pyop.current_line_num() + if lineno is None: + print('Unable to read python frame line number') + return + + if start is None: + start = lineno - 5 + end = lineno + 5 + + if start<1: + start = 1 + + try: + f = open(os_fsencode(filename), 'r') + except IOError as err: + sys.stdout.write('Unable to open %s: %s\n' + % (filename, err)) + return + with f: + all_lines = f.readlines() + # start and end are 1-based, all_lines is 0-based; + # so [start-1:end] as a python slice gives us [start, end] as a + # closed interval + for i, line in enumerate(all_lines[start-1:end]): + linestr = str(i+start) + # Highlight current line: + if i + start == lineno: + linestr = '>' + linestr + sys.stdout.write('%4s %s' % (linestr, line)) + + +# ...and register the command: +PyList() + +def move_in_stack(move_up): + '''Move up or down the stack (for the py-up/py-down command)''' + frame = Frame.get_selected_python_frame() + if not frame: + print('Unable to locate python frame') + return + + while frame: + if move_up: + iter_frame = frame.older() + else: + iter_frame = frame.newer() + + if not iter_frame: + break + + if iter_frame.is_python_frame(): + # Result: + if iter_frame.select(): + iter_frame.print_summary() + return + + frame = iter_frame + + if move_up: + print('Unable to find an older python frame') + else: + print('Unable to find a newer python frame') + +class PyUp(gdb.Command): + 'Select and print the python stack frame that called this one (if any)' + def __init__(self): + gdb.Command.__init__ (self, + "py-up", + gdb.COMMAND_STACK, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + move_in_stack(move_up=True) + +class PyDown(gdb.Command): + 'Select and print the python stack frame called by this one (if any)' + def __init__(self): + gdb.Command.__init__ (self, + "py-down", + gdb.COMMAND_STACK, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + move_in_stack(move_up=False) + +# Not all builds of gdb have gdb.Frame.select +if hasattr(gdb.Frame, 'select'): + PyUp() + PyDown() + +class PyBacktraceFull(gdb.Command): + 'Display the current python frame and all the frames within its call stack (if any)' + def __init__(self): + gdb.Command.__init__ (self, + "py-bt-full", + gdb.COMMAND_STACK, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + frame = Frame.get_selected_python_frame() + if not frame: + print('Unable to locate python frame') + return + + while frame: + if frame.is_python_frame(): + frame.print_summary() + frame = frame.older() + +PyBacktraceFull() + +class PyBacktrace(gdb.Command): + 'Display the current python frame and all the frames within its call stack (if any)' + def __init__(self): + gdb.Command.__init__ (self, + "py-bt", + gdb.COMMAND_STACK, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + frame = Frame.get_selected_python_frame() + if not frame: + print('Unable to locate python frame') + return + + sys.stdout.write('Traceback (most recent call first):\n') + while frame: + if frame.is_python_frame(): + frame.print_traceback() + frame = frame.older() + +PyBacktrace() + +class PyPrint(gdb.Command): + 'Look up the given python variable name, and print it' + def __init__(self): + gdb.Command.__init__ (self, + "py-print", + gdb.COMMAND_DATA, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + name = str(args) + + frame = Frame.get_selected_python_frame() + if not frame: + print('Unable to locate python frame') + return + + pyop_frame = frame.get_pyop() + if not pyop_frame: + print(UNABLE_READ_INFO_PYTHON_FRAME) + return + + pyop_var, scope = pyop_frame.get_var_by_name(name) + + if pyop_var: + print('%s %r = %s' + % (scope, + name, + pyop_var.get_truncated_repr(MAX_OUTPUT_LEN))) + else: + print('%r not found' % name) + +PyPrint() + +class PyLocals(gdb.Command): + 'Look up the given python variable name, and print it' + def __init__(self): + gdb.Command.__init__ (self, + "py-locals", + gdb.COMMAND_DATA, + gdb.COMPLETE_NONE) + + + def invoke(self, args, from_tty): + name = str(args) + + frame = Frame.get_selected_python_frame() + if not frame: + print('Unable to locate python frame') + return + + pyop_frame = frame.get_pyop() + if not pyop_frame: + print(UNABLE_READ_INFO_PYTHON_FRAME) + return + + for pyop_name, pyop_value in pyop_frame.iter_locals(): + print('%s = %s' % ( + pyop_name.proxyval(set()), + pyop_value.get_truncated_repr(MAX_OUTPUT_LEN), + )) + +PyLocals() + + +################################################################## +## added, not in CPython +################################################################## + +import re +import warnings +import tempfile +import functools +import textwrap +import itertools +import traceback + + +def dont_suppress_errors(function): + "*sigh*, readline" + @functools.wraps(function) + def wrapper(*args, **kwargs): + try: + return function(*args, **kwargs) + except Exception: + traceback.print_exc() + raise + + return wrapper + +class PyGlobals(gdb.Command): + 'List all the globals in the currently select Python frame' + def __init__(self): + gdb.Command.__init__ (self, + "py-globals", + gdb.COMMAND_DATA, + gdb.COMPLETE_NONE) + + @dont_suppress_errors + def invoke(self, args, from_tty): + name = str(args) + + frame = Frame.get_selected_python_frame() + if not frame: + print('Unable to locate python frame') + return + + pyop_frame = frame.get_pyop() + if not pyop_frame: + print(UNABLE_READ_INFO_PYTHON_FRAME) + return + + for pyop_name, pyop_value in pyop_frame.iter_locals(): + print('%s = %s' + % (pyop_name.proxyval(set()), + pyop_value.get_truncated_repr(MAX_OUTPUT_LEN))) + + def get_namespace(self, pyop_frame): + return pyop_frame.iter_globals() + + +PyGlobals() + +# This function used to be a part of CPython's libpython.py (as a member function of frame). +# It isn't anymore, so I copied it. +def is_evalframeex(frame): + '''Is this a PyEval_EvalFrameEx frame?''' + if frame._gdbframe.name() == 'PyEval_EvalFrameEx': + ''' + I believe we also need to filter on the inline + struct frame_id.inline_depth, only regarding frames with + an inline depth of 0 as actually being this function + + So we reject those with type gdb.INLINE_FRAME + ''' + if frame._gdbframe.type() == gdb.NORMAL_FRAME: + # We have a PyEval_EvalFrameEx frame: + return True + + return False + +class PyNameEquals(gdb.Function): + + def _get_pycurframe_attr(self, attr): + frame = Frame(gdb.selected_frame()) + if is_evalframeex(frame): + pyframe = frame.get_pyop() + if pyframe is None: + warnings.warn("Use a Python debug build, Python breakpoints " + "won't work otherwise.") + return None + + return getattr(pyframe, attr).proxyval(set()) + + return None + + @dont_suppress_errors + def invoke(self, funcname): + attr = self._get_pycurframe_attr('co_name') + return attr is not None and attr == funcname.string() + +PyNameEquals("pyname_equals") + + +class PyModEquals(PyNameEquals): + + @dont_suppress_errors + def invoke(self, modname): + attr = self._get_pycurframe_attr('co_filename') + if attr is not None: + filename, ext = os.path.splitext(os.path.basename(attr)) + return filename == modname.string() + return False + +PyModEquals("pymod_equals") + + +class PyBreak(gdb.Command): + """ + Set a Python breakpoint. Examples: + + Break on any function or method named 'func' in module 'modname' + + py-break modname.func + + Break on any function or method named 'func' + + py-break func + """ + + @dont_suppress_errors + def invoke(self, funcname, from_tty): + if '.' in funcname: + modname, dot, funcname = funcname.rpartition('.') + cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname, + modname) + else: + cond = '$pyname_equals("%s")' % funcname + + gdb.execute('break PyEval_EvalFrameEx if ' + cond) + +PyBreak("py-break", gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE) + + +class _LoggingState(object): + """ + State that helps to provide a reentrant gdb.execute() function. + """ + + def __init__(self): + f = tempfile.NamedTemporaryFile('r+') + self.file = f + self.filename = f.name + self.fd = f.fileno() + _execute("set logging file %s" % self.filename) + self.file_position_stack = [] + + def __enter__(self): + if not self.file_position_stack: + _execute("set logging redirect on") + _execute("set logging on") + _execute("set pagination off") + + self.file_position_stack.append(os.fstat(self.fd).st_size) + return self + + def getoutput(self): + gdb.flush() + self.file.seek(self.file_position_stack[-1]) + result = self.file.read() + return result + + def __exit__(self, exc_type, exc_val, tb): + startpos = self.file_position_stack.pop() + self.file.seek(startpos) + self.file.truncate() + if not self.file_position_stack: + _execute("set logging off") + _execute("set logging redirect off") + _execute("set pagination on") + + +def execute(command, from_tty=False, to_string=False): + """ + Replace gdb.execute() with this function and have it accept a 'to_string' + argument (new in 7.2). Have it properly capture stderr also. Ensure + reentrancy. + """ + if to_string: + with _logging_state as state: + _execute(command, from_tty) + return state.getoutput() + else: + _execute(command, from_tty) + + +_execute = gdb.execute +gdb.execute = execute +_logging_state = _LoggingState() + + +def get_selected_inferior(): + """ + Return the selected inferior in gdb. + """ + # Woooh, another bug in gdb! Is there an end in sight? + # http://sourceware.org/bugzilla/show_bug.cgi?id=12212 + return gdb.inferiors()[0] + + selected_thread = gdb.selected_thread() + + for inferior in gdb.inferiors(): + for thread in inferior.threads(): + if thread == selected_thread: + return inferior + + +def source_gdb_script(script_contents, to_string=False): + """ + Source a gdb script with script_contents passed as a string. This is useful + to provide defines for py-step and py-next to make them repeatable (this is + not possible with gdb.execute()). See + http://sourceware.org/bugzilla/show_bug.cgi?id=12216 + """ + fd, filename = tempfile.mkstemp() + f = os.fdopen(fd, 'w') + f.write(script_contents) + f.close() + gdb.execute("source %s" % filename, to_string=to_string) + os.remove(filename) + + +def register_defines(): + source_gdb_script(textwrap.dedent("""\ + define py-step + -py-step + end + + define py-next + -py-next + end + + document py-step + %s + end + + document py-next + %s + end + """) % (PyStep.__doc__, PyNext.__doc__)) + + +def stackdepth(frame): + "Tells the stackdepth of a gdb frame." + depth = 0 + while frame: + frame = frame.older() + depth += 1 + + return depth + + +class ExecutionControlCommandBase(gdb.Command): + """ + Superclass for language specific execution control. Language specific + features should be implemented by lang_info using the LanguageInfo + interface. 'name' is the name of the command. + """ + + def __init__(self, name, lang_info): + super(ExecutionControlCommandBase, self).__init__( + name, gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE) + self.lang_info = lang_info + + def install_breakpoints(self): + all_locations = itertools.chain( + self.lang_info.static_break_functions(), + self.lang_info.runtime_break_functions()) + + for location in all_locations: + result = gdb.execute('break %s' % location, to_string=True) + yield re.search(r'Breakpoint (\d+)', result).group(1) + + def delete_breakpoints(self, breakpoint_list): + for bp in breakpoint_list: + gdb.execute("delete %s" % bp) + + def filter_output(self, result): + reflags = re.MULTILINE + + output_on_halt = [ + (r'^Program received signal .*', reflags|re.DOTALL), + (r'.*[Ww]arning.*', 0), + (r'^Program exited .*', reflags), + ] + + output_always = [ + # output when halting on a watchpoint + (r'^(Old|New) value = .*', reflags), + # output from the 'display' command + (r'^\d+: \w+ = .*', reflags), + ] + + def filter_output(regexes): + output = [] + for regex, flags in regexes: + for match in re.finditer(regex, result, flags): + output.append(match.group(0)) + + return '\n'.join(output) + + # Filter the return value output of the 'finish' command + match_finish = re.search(r'^Value returned is \$\d+ = (.*)', result, + re.MULTILINE) + if match_finish: + finish_output = 'Value returned: %s\n' % match_finish.group(1) + else: + finish_output = '' + + return (filter_output(output_on_halt), + finish_output + filter_output(output_always)) + + def stopped(self): + return get_selected_inferior().pid == 0 + + def finish_executing(self, result): + """ + After doing some kind of code running in the inferior, print the line + of source code or the result of the last executed gdb command (passed + in as the `result` argument). + """ + output_on_halt, output_always = self.filter_output(result) + + if self.stopped(): + print(output_always) + print(output_on_halt) + else: + frame = gdb.selected_frame() + source_line = self.lang_info.get_source_line(frame) + if self.lang_info.is_relevant_function(frame): + raised_exception = self.lang_info.exc_info(frame) + if raised_exception: + print(raised_exception) + + if source_line: + if output_always.rstrip(): + print(output_always.rstrip()) + print(source_line) + else: + print(result) + + def _finish(self): + """ + Execute until the function returns (or until something else makes it + stop) + """ + if gdb.selected_frame().older() is not None: + return gdb.execute('finish', to_string=True) + else: + # outermost frame, continue + return gdb.execute('cont', to_string=True) + + def _finish_frame(self): + """ + Execute until the function returns to a relevant caller. + """ + while True: + result = self._finish() + + try: + frame = gdb.selected_frame() + except RuntimeError: + break + + hitbp = re.search(r'Breakpoint (\d+)', result) + is_relevant = self.lang_info.is_relevant_function(frame) + if hitbp or is_relevant or self.stopped(): + break + + return result + + def finish(self, *args): + "Implements the finish command." + result = self._finish_frame() + self.finish_executing(result) + + def step(self, stepinto, stepover_command='next'): + """ + Do a single step or step-over. Returns the result of the last gdb + command that made execution stop. + + This implementation, for stepping, sets (conditional) breakpoints for + all functions that are deemed relevant. It then does a step over until + either something halts execution, or until the next line is reached. + + If, however, stepover_command is given, it should be a string gdb + command that continues execution in some way. The idea is that the + caller has set a (conditional) breakpoint or watchpoint that can work + more efficiently than the step-over loop. For Python this means setting + a watchpoint for f->f_lasti, which means we can then subsequently + "finish" frames. + We want f->f_lasti instead of f->f_lineno, because the latter only + works properly with local trace functions, see + PyFrameObjectPtr.current_line_num and PyFrameObjectPtr.addr2line. + """ + if stepinto: + breakpoint_list = list(self.install_breakpoints()) + + beginframe = gdb.selected_frame() + + if self.lang_info.is_relevant_function(beginframe): + # If we start in a relevant frame, initialize stuff properly. If + # we don't start in a relevant frame, the loop will halt + # immediately. So don't call self.lang_info.lineno() as it may + # raise for irrelevant frames. + beginline = self.lang_info.lineno(beginframe) + + if not stepinto: + depth = stackdepth(beginframe) + + newframe = beginframe + + while True: + if self.lang_info.is_relevant_function(newframe): + result = gdb.execute(stepover_command, to_string=True) + else: + result = self._finish_frame() + + if self.stopped(): + break + + newframe = gdb.selected_frame() + is_relevant_function = self.lang_info.is_relevant_function(newframe) + try: + framename = newframe.name() + except RuntimeError: + framename = None + + m = re.search(r'Breakpoint (\d+)', result) + if m: + if is_relevant_function and m.group(1) in breakpoint_list: + # although we hit a breakpoint, we still need to check + # that the function, in case hit by a runtime breakpoint, + # is in the right context + break + + if newframe != beginframe: + # new function + + if not stepinto: + # see if we returned to the caller + newdepth = stackdepth(newframe) + is_relevant_function = (newdepth < depth and + is_relevant_function) + + if is_relevant_function: + break + else: + # newframe equals beginframe, check for a difference in the + # line number + lineno = self.lang_info.lineno(newframe) + if lineno and lineno != beginline: + break + + if stepinto: + self.delete_breakpoints(breakpoint_list) + + self.finish_executing(result) + + def run(self, args, from_tty): + self.finish_executing(gdb.execute('run ' + args, to_string=True)) + + def cont(self, *args): + self.finish_executing(gdb.execute('cont', to_string=True)) + + +class LanguageInfo(object): + """ + This class defines the interface that ExecutionControlCommandBase needs to + provide language-specific execution control. + + Classes that implement this interface should implement: + + lineno(frame) + Tells the current line number (only called for a relevant frame). + If lineno is a false value it is not checked for a difference. + + is_relevant_function(frame) + tells whether we care about frame 'frame' + + get_source_line(frame) + get the line of source code for the current line (only called for a + relevant frame). If the source code cannot be retrieved this + function should return None + + exc_info(frame) -- optional + tells whether an exception was raised, if so, it should return a + string representation of the exception value, None otherwise. + + static_break_functions() + returns an iterable of function names that are considered relevant + and should halt step-into execution. This is needed to provide a + performing step-into + + runtime_break_functions() -- optional + list of functions that we should break into depending on the + context + """ + + def exc_info(self, frame): + "See this class' docstring." + + def runtime_break_functions(self): + """ + Implement this if the list of step-into functions depends on the + context. + """ + return () + + +class PythonInfo(LanguageInfo): + + def pyframe(self, frame): + pyframe = Frame(frame).get_pyop() + if pyframe: + return pyframe + else: + raise gdb.RuntimeError( + "Unable to find the Python frame, run your code with a debug " + "build (configure with --with-pydebug or compile with -g).") + + def lineno(self, frame): + return self.pyframe(frame).current_line_num() + + def is_relevant_function(self, frame): + return Frame(frame).is_evalframeex() + + def get_source_line(self, frame): + try: + pyframe = self.pyframe(frame) + return '%4d %s' % (pyframe.current_line_num(), + pyframe.current_line().rstrip()) + except IOError: + return None + + def exc_info(self, frame): + try: + tstate = frame.read_var('tstate').dereference() + if gdb.parse_and_eval('tstate->frame == f'): + # tstate local variable initialized, check for an exception + if sys.version_info >= (3, 12, 0, 'alpha', 6): + inf_type = inf_value = tstate['current_exception'] + else: + inf_type = tstate['curexc_type'] + inf_value = tstate['curexc_value'] + + if inf_type: + return 'An exception was raised: %s' % (inf_value,) + except (ValueError, RuntimeError): + # Could not read the variable tstate or it's memory, it's ok + pass + + def static_break_functions(self): + yield 'PyEval_EvalFrameEx' + + +class PythonStepperMixin(object): + """ + Make this a mixin so CyStep can also inherit from this and use a + CythonCodeStepper at the same time. + """ + + def python_step(self, stepinto): + """ + Set a watchpoint on the Python bytecode instruction pointer and try + to finish the frame + """ + output = gdb.execute('watch f->f_lasti', to_string=True) + watchpoint = int(re.search(r'[Ww]atchpoint (\d+):', output).group(1)) + self.step(stepinto=stepinto, stepover_command='finish') + gdb.execute('delete %s' % watchpoint) + + +class PyStep(ExecutionControlCommandBase, PythonStepperMixin): + "Step through Python code." + + stepinto = True + + @dont_suppress_errors + def invoke(self, args, from_tty): + self.python_step(stepinto=self.stepinto) + + +class PyNext(PyStep): + "Step-over Python code." + + stepinto = False + + +class PyFinish(ExecutionControlCommandBase): + "Execute until function returns to a caller." + + invoke = dont_suppress_errors(ExecutionControlCommandBase.finish) + + +class PyRun(ExecutionControlCommandBase): + "Run the program." + + invoke = dont_suppress_errors(ExecutionControlCommandBase.run) + + +class PyCont(ExecutionControlCommandBase): + + invoke = dont_suppress_errors(ExecutionControlCommandBase.cont) + + +def _pointervalue(gdbval): + """ + Return the value of the pointer as a Python int. + + gdbval.type must be a pointer type + """ + # don't convert with int() as it will raise a RuntimeError + if gdbval.address is not None: + return int(gdbval.address) + else: + # the address attribute is None sometimes, in which case we can + # still convert the pointer to an int + return int(gdbval) + + +def pointervalue(gdbval): + pointer = _pointervalue(gdbval) + try: + if pointer < 0: + raise gdb.GdbError("Negative pointer value, presumably a bug " + "in gdb, aborting.") + except RuntimeError: + # work around yet another bug in gdb where you get random behaviour + # and tracebacks + pass + + return pointer + + +def get_inferior_unicode_postfix(): + try: + gdb.parse_and_eval('PyUnicode_FromEncodedObject') + except RuntimeError: + try: + gdb.parse_and_eval('PyUnicodeUCS2_FromEncodedObject') + except RuntimeError: + return 'UCS4' + else: + return 'UCS2' + else: + return '' + + +class PythonCodeExecutor(object): + + Py_single_input = 256 + Py_file_input = 257 + Py_eval_input = 258 + + def malloc(self, size): + chunk = (gdb.parse_and_eval("(void *) malloc((size_t) %d)" % size)) + + pointer = pointervalue(chunk) + if pointer == 0: + raise gdb.GdbError("No memory could be allocated in the inferior.") + + return pointer + + def alloc_string(self, string): + pointer = self.malloc(len(string)) + get_selected_inferior().write_memory(pointer, string) + + return pointer + + def alloc_pystring(self, string): + stringp = self.alloc_string(string) + PyString_FromStringAndSize = 'PyString_FromStringAndSize' + + try: + gdb.parse_and_eval(PyString_FromStringAndSize) + except RuntimeError: + # Python 3 + PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' % + (get_inferior_unicode_postfix(),)) + + try: + result = gdb.parse_and_eval( + '(PyObject *) %s((char *) %d, (size_t) %d)' % ( + PyString_FromStringAndSize, stringp, len(string))) + finally: + self.free(stringp) + + pointer = pointervalue(result) + if pointer == 0: + raise gdb.GdbError("Unable to allocate Python string in " + "the inferior.") + + return pointer + + def free(self, pointer): + gdb.parse_and_eval("(void) free((void *) %d)" % pointer) + + def incref(self, pointer): + "Increment the reference count of a Python object in the inferior." + gdb.parse_and_eval('Py_IncRef((PyObject *) %d)' % pointer) + + def xdecref(self, pointer): + "Decrement the reference count of a Python object in the inferior." + # Py_DecRef is like Py_XDECREF, but a function. So we don't have + # to check for NULL. This should also decref all our allocated + # Python strings. + gdb.parse_and_eval('Py_DecRef((PyObject *) %d)' % pointer) + + def evalcode(self, code, input_type, global_dict=None, local_dict=None): + """ + Evaluate python code `code` given as a string in the inferior and + return the result as a gdb.Value. Returns a new reference in the + inferior. + + Of course, executing any code in the inferior may be dangerous and may + leave the debuggee in an unsafe state or terminate it altogether. + """ + if '\0' in code: + raise gdb.GdbError("String contains NUL byte.") + + code += '\0' + + pointer = self.alloc_string(code) + + globalsp = pointervalue(global_dict) + localsp = pointervalue(local_dict) + + if globalsp == 0 or localsp == 0: + raise gdb.GdbError("Unable to obtain or create locals or globals.") + + code = """ + PyRun_String( + (char *) %(code)d, + (int) %(start)d, + (PyObject *) %(globals)s, + (PyObject *) %(locals)d) + """ % dict(code=pointer, start=input_type, + globals=globalsp, locals=localsp) + + with FetchAndRestoreError(): + try: + pyobject_return_value = gdb.parse_and_eval(code) + finally: + self.free(pointer) + + return pyobject_return_value + + +class FetchAndRestoreError(PythonCodeExecutor): + """ + Context manager that fetches the error indicator in the inferior and + restores it on exit. + """ + + def __init__(self): + self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof + self.pointer = self.malloc(self.sizeof_PyObjectPtr * 3) + + type = self.pointer + value = self.pointer + self.sizeof_PyObjectPtr + traceback = self.pointer + self.sizeof_PyObjectPtr * 2 + + self.errstate = type, value, traceback + + def __enter__(self): + gdb.parse_and_eval("PyErr_Fetch(%d, %d, %d)" % self.errstate) + + def __exit__(self, *args): + if gdb.parse_and_eval("(int) PyErr_Occurred()"): + gdb.parse_and_eval("PyErr_Print()") + + pyerr_restore = ("PyErr_Restore(" + "(PyObject *) *%d," + "(PyObject *) *%d," + "(PyObject *) *%d)") + + try: + gdb.parse_and_eval(pyerr_restore % self.errstate) + finally: + self.free(self.pointer) + + +class FixGdbCommand(gdb.Command): + + def __init__(self, command, actual_command): + super(FixGdbCommand, self).__init__(command, gdb.COMMAND_DATA, + gdb.COMPLETE_NONE) + self.actual_command = actual_command + + def fix_gdb(self): + """ + It seems that invoking either 'cy exec' and 'py-exec' work perfectly + fine, but after this gdb's python API is entirely broken. + Maybe some uncleared exception value is still set? + sys.exc_clear() didn't help. A demonstration: + + (gdb) cy exec 'hello' + 'hello' + (gdb) python gdb.execute('cont') + RuntimeError: Cannot convert value to int. + Error while executing Python code. + (gdb) python gdb.execute('cont') + [15148 refs] + + Program exited normally. + """ + warnings.filterwarnings('ignore', r'.*', RuntimeWarning, + re.escape(__name__)) + try: + int(gdb.parse_and_eval("(void *) 0")) == 0 + except RuntimeError: + pass + # warnings.resetwarnings() + + @dont_suppress_errors + def invoke(self, args, from_tty): + self.fix_gdb() + try: + gdb.execute('%s %s' % (self.actual_command, args)) + except RuntimeError as e: + raise gdb.GdbError(str(e)) + self.fix_gdb() + + +def _evalcode_python(executor, code, input_type): + """ + Execute Python code in the most recent stack frame. + """ + global_dict = gdb.parse_and_eval('PyEval_GetGlobals()') + local_dict = gdb.parse_and_eval('PyEval_GetLocals()') + + if (pointervalue(global_dict) == 0 or pointervalue(local_dict) == 0): + raise gdb.GdbError("Unable to find the locals or globals of the " + "most recent Python function (relative to the " + "selected frame).") + + return executor.evalcode(code, input_type, global_dict, local_dict) + + +class PyExec(gdb.Command): + + def readcode(self, expr): + if expr: + return expr, PythonCodeExecutor.Py_single_input + else: + lines = [] + while True: + try: + if sys.version_info[0] == 2: + line = raw_input() + else: + line = input('>') + except EOFError: + break + else: + if line.rstrip() == 'end': + break + + lines.append(line) + + return '\n'.join(lines), PythonCodeExecutor.Py_file_input + + @dont_suppress_errors + def invoke(self, expr, from_tty): + expr, input_type = self.readcode(expr) + executor = PythonCodeExecutor() + executor.xdecref(_evalcode_python(executor, input_type, global_dict, local_dict)) + + +gdb.execute('set breakpoint pending on') + +if hasattr(gdb, 'GdbError'): + # Wrap py-step and py-next in gdb defines to make them repeatable. + py_step = PyStep('-py-step', PythonInfo()) + py_next = PyNext('-py-next', PythonInfo()) + register_defines() + py_finish = PyFinish('py-finish', PythonInfo()) + py_run = PyRun('py-run', PythonInfo()) + py_cont = PyCont('py-cont', PythonInfo()) + + py_exec = FixGdbCommand('py-exec', '-py-exec') + _py_exec = PyExec("-py-exec", gdb.COMMAND_DATA, gdb.COMPLETE_NONE) +else: + warnings.warn("Use gdb 7.2 or higher to use the py-exec command.") diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a24001f15fa6b82dd3c41ab172da0df1dbe2970 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__init__.py @@ -0,0 +1,2 @@ +from Cython.Distutils.build_ext import build_ext +from Cython.Distutils.extension import Extension diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91c2f1a0944d89a52bf3559e7c4e3964d8c5c47b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/build_ext.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/build_ext.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d311ba0a50be45ee3f4d010c27d1b9c7ee0f577 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/build_ext.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/extension.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/extension.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..034722938a78b510ba0acb18b22c4c21328d9b93 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/extension.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/old_build_ext.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/old_build_ext.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89377854e75cb49cffadf03a46310eb2923ec9c2 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/__pycache__/old_build_ext.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/build_ext.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/build_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..f04a4e0fa708f8c9e6e5e777d823e6bf415d6dbd --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/build_ext.py @@ -0,0 +1,138 @@ +import sys +import os + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + +# Always inherit from the "build_ext" in distutils since setuptools already imports +# it from Cython if available, and does the proper distutils fallback otherwise. +# https://github.com/pypa/setuptools/blob/9f1822ee910df3df930a98ab99f66d18bb70659b/setuptools/command/build_ext.py#L16 + +# setuptools imports Cython's "build_ext", so make sure we go first. +_build_ext_module = sys.modules.get('setuptools.command.build_ext') +if _build_ext_module is None: + try: + import distutils.command.build_ext as _build_ext_module + except ImportError: + # Python 3.12 no longer has distutils, but setuptools can replace it. + try: + import setuptools.command.build_ext as _build_ext_module + except ImportError: + raise ImportError("'distutils' cannot be imported. Please install setuptools.") + + +# setuptools remembers the original distutils "build_ext" as "_du_build_ext" +_build_ext = getattr(_build_ext_module, '_du_build_ext', None) +if _build_ext is None: + _build_ext = getattr(_build_ext_module, 'build_ext', None) +if _build_ext is None: + from distutils.command.build_ext import build_ext as _build_ext + + +class build_ext(_build_ext, object): + + user_options = _build_ext.user_options + [ + ('cython-cplus', None, + "generate C++ source files"), + ('cython-create-listing', None, + "write errors to a listing file"), + ('cython-line-directives', None, + "emit source line directives"), + ('cython-include-dirs=', None, + "path to the Cython include files" + _build_ext.sep_by), + ('cython-c-in-temp', None, + "put generated C files in temp directory"), + ('cython-gen-pxi', None, + "generate .pxi file for public declarations"), + ('cython-directives=', None, + "compiler directive overrides"), + ('cython-gdb', None, + "generate debug information for cygdb"), + ('cython-compile-time-env', None, + "cython compile time environment"), + ] + + boolean_options = _build_ext.boolean_options + [ + 'cython-cplus', 'cython-create-listing', 'cython-line-directives', + 'cython-c-in-temp', 'cython-gdb', + ] + + def initialize_options(self): + super(build_ext, self).initialize_options() + self.cython_cplus = 0 + self.cython_create_listing = 0 + self.cython_line_directives = 0 + self.cython_include_dirs = None + self.cython_directives = None + self.cython_c_in_temp = 0 + self.cython_gen_pxi = 0 + self.cython_gdb = False + self.cython_compile_time_env = None + + def finalize_options(self): + super(build_ext, self).finalize_options() + if self.cython_include_dirs is None: + self.cython_include_dirs = [] + elif isinstance(self.cython_include_dirs, basestring): + self.cython_include_dirs = \ + self.cython_include_dirs.split(os.pathsep) + if self.cython_directives is None: + self.cython_directives = {} + + def get_extension_attr(self, extension, option_name, default=False): + return getattr(self, option_name) or getattr(extension, option_name, default) + + def build_extension(self, ext): + from Cython.Build.Dependencies import cythonize + + # Set up the include_path for the Cython compiler: + # 1. Start with the command line option. + # 2. Add in any (unique) paths from the extension + # cython_include_dirs (if Cython.Distutils.extension is used). + # 3. Add in any (unique) paths from the extension include_dirs + includes = list(self.cython_include_dirs) + for include_dir in getattr(ext, 'cython_include_dirs', []): + if include_dir not in includes: + includes.append(include_dir) + + # In case extension.include_dirs is a generator, evaluate it and keep + # result + ext.include_dirs = list(ext.include_dirs) + for include_dir in ext.include_dirs + list(self.include_dirs): + if include_dir not in includes: + includes.append(include_dir) + + # Set up Cython compiler directives: + # 1. Start with the command line option. + # 2. Add in any (unique) entries from the extension + # cython_directives (if Cython.Distutils.extension is used). + directives = dict(self.cython_directives) + if hasattr(ext, "cython_directives"): + directives.update(ext.cython_directives) + + if self.get_extension_attr(ext, 'cython_cplus'): + ext.language = 'c++' + + options = { + 'use_listing_file': self.get_extension_attr(ext, 'cython_create_listing'), + 'emit_linenums': self.get_extension_attr(ext, 'cython_line_directives'), + 'include_path': includes, + 'compiler_directives': directives, + 'build_dir': self.build_temp if self.get_extension_attr(ext, 'cython_c_in_temp') else None, + 'generate_pxi': self.get_extension_attr(ext, 'cython_gen_pxi'), + 'gdb_debug': self.get_extension_attr(ext, 'cython_gdb'), + 'c_line_in_traceback': not getattr(ext, 'no_c_in_traceback', 0), + 'compile_time_env': self.get_extension_attr(ext, 'cython_compile_time_env', default=None), + } + + new_ext = cythonize( + ext,force=self.force, quiet=self.verbose == 0, **options + )[0] + + ext.sources = new_ext.sources + super(build_ext, self).build_extension(ext) + +# backward compatibility +new_build_ext = build_ext diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/old_build_ext.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/old_build_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..cec54d93d0a7c1f6b3c15eac1348578d7aada93f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/old_build_ext.py @@ -0,0 +1,357 @@ +"""Cython.Distutils.old_build_ext + +Implements a version of the Distutils 'build_ext' command, for +building Cython extension modules. + +Note that this module is deprecated. Use cythonize() instead. +""" + +__revision__ = "$Id:$" + +import sys +import os +from distutils.errors import DistutilsPlatformError +from distutils.dep_util import newer, newer_group +from distutils import log +from distutils.command import build_ext as _build_ext +from distutils import sysconfig + + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + + +# FIXME: the below does not work as intended since importing 'Cython.Distutils' already +# imports this module through 'Cython/Distutils/build_ext.py', so the condition is +# always false and never prints the warning. +""" +import inspect +import warnings + +def _check_stack(path): + try: + for frame in inspect.getouterframes(inspect.currentframe(), 0): + if path in frame[1].replace(os.sep, '/'): + return True + except Exception: + pass + return False + + +if (not _check_stack('setuptools/extensions.py') + and not _check_stack('pyximport/pyxbuild.py') + and not _check_stack('Cython/Distutils/build_ext.py')): + warnings.warn( + "Cython.Distutils.old_build_ext does not properly handle dependencies " + "and is deprecated.") +""" + +extension_name_re = _build_ext.extension_name_re + +show_compilers = _build_ext.show_compilers + +class Optimization(object): + def __init__(self): + self.flags = ( + 'OPT', + 'CFLAGS', + 'CPPFLAGS', + 'EXTRA_CFLAGS', + 'BASECFLAGS', + 'PY_CFLAGS', + ) + self.state = sysconfig.get_config_vars(*self.flags) + self.config_vars = sysconfig.get_config_vars() + + + def disable_optimization(self): + "disable optimization for the C or C++ compiler" + badoptions = ('-O1', '-O2', '-O3') + + for flag, option in zip(self.flags, self.state): + if option is not None: + L = [opt for opt in option.split() if opt not in badoptions] + self.config_vars[flag] = ' '.join(L) + + def restore_state(self): + "restore the original state" + for flag, option in zip(self.flags, self.state): + if option is not None: + self.config_vars[flag] = option + + +optimization = Optimization() + + +class old_build_ext(_build_ext.build_ext): + + description = "build C/C++ and Cython extensions (compile/link to build directory)" + + sep_by = _build_ext.build_ext.sep_by + user_options = _build_ext.build_ext.user_options[:] + boolean_options = _build_ext.build_ext.boolean_options[:] + help_options = _build_ext.build_ext.help_options[:] + + # Add the pyrex specific data. + user_options.extend([ + ('cython-cplus', None, + "generate C++ source files"), + ('cython-create-listing', None, + "write errors to a listing file"), + ('cython-line-directives', None, + "emit source line directives"), + ('cython-include-dirs=', None, + "path to the Cython include files" + sep_by), + ('cython-c-in-temp', None, + "put generated C files in temp directory"), + ('cython-gen-pxi', None, + "generate .pxi file for public declarations"), + ('cython-directives=', None, + "compiler directive overrides"), + ('cython-gdb', None, + "generate debug information for cygdb"), + ('cython-compile-time-env', None, + "cython compile time environment"), + + # For backwards compatibility. + ('pyrex-cplus', None, + "generate C++ source files"), + ('pyrex-create-listing', None, + "write errors to a listing file"), + ('pyrex-line-directives', None, + "emit source line directives"), + ('pyrex-include-dirs=', None, + "path to the Cython include files" + sep_by), + ('pyrex-c-in-temp', None, + "put generated C files in temp directory"), + ('pyrex-gen-pxi', None, + "generate .pxi file for public declarations"), + ('pyrex-directives=', None, + "compiler directive overrides"), + ('pyrex-gdb', None, + "generate debug information for cygdb"), + ]) + + boolean_options.extend([ + 'cython-cplus', 'cython-create-listing', 'cython-line-directives', + 'cython-c-in-temp', 'cython-gdb', + + # For backwards compatibility. + 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives', + 'pyrex-c-in-temp', 'pyrex-gdb', + ]) + + def initialize_options(self): + _build_ext.build_ext.initialize_options(self) + self.cython_cplus = 0 + self.cython_create_listing = 0 + self.cython_line_directives = 0 + self.cython_include_dirs = None + self.cython_directives = None + self.cython_c_in_temp = 0 + self.cython_gen_pxi = 0 + self.cython_gdb = False + self.no_c_in_traceback = 0 + self.cython_compile_time_env = None + + def __getattr__(self, name): + if name[:6] == 'pyrex_': + return getattr(self, 'cython_' + name[6:]) + else: + return _build_ext.build_ext.__getattr__(self, name) + + def __setattr__(self, name, value): + if name[:6] == 'pyrex_': + return setattr(self, 'cython_' + name[6:], value) + else: + # _build_ext.build_ext.__setattr__(self, name, value) + self.__dict__[name] = value + + def finalize_options(self): + _build_ext.build_ext.finalize_options(self) + if self.cython_include_dirs is None: + self.cython_include_dirs = [] + elif isinstance(self.cython_include_dirs, basestring): + self.cython_include_dirs = \ + self.cython_include_dirs.split(os.pathsep) + if self.cython_directives is None: + self.cython_directives = {} + # finalize_options () + + def run(self): + # We have one shot at this before build_ext initializes the compiler. + # If --pyrex-gdb is in effect as a command line option or as option + # of any Extension module, disable optimization for the C or C++ + # compiler. + if self.cython_gdb or [1 for ext in self.extensions + if getattr(ext, 'cython_gdb', False)]: + optimization.disable_optimization() + + _build_ext.build_ext.run(self) + + def check_extensions_list(self, extensions): + # Note: might get called multiple times. + _build_ext.build_ext.check_extensions_list(self, extensions) + for ext in self.extensions: + ext.sources = self.cython_sources(ext.sources, ext) + + def cython_sources(self, sources, extension): + """ + Walk the list of source files in 'sources', looking for Cython + source files (.pyx and .py). Run Cython on all that are + found, and return a modified 'sources' list with Cython source + files replaced by the generated C (or C++) files. + """ + new_sources = [] + cython_sources = [] + cython_targets = {} + + # Setup create_list and cplus from the extension options if + # Cython.Distutils.extension.Extension is used, otherwise just + # use what was parsed from the command-line or the configuration file. + # cplus will also be set to true is extension.language is equal to + # 'C++' or 'c++'. + #try: + # create_listing = self.cython_create_listing or \ + # extension.cython_create_listing + # cplus = self.cython_cplus or \ + # extension.cython_cplus or \ + # (extension.language != None and \ + # extension.language.lower() == 'c++') + #except AttributeError: + # create_listing = self.cython_create_listing + # cplus = self.cython_cplus or \ + # (extension.language != None and \ + # extension.language.lower() == 'c++') + + create_listing = self.cython_create_listing or \ + getattr(extension, 'cython_create_listing', 0) + line_directives = self.cython_line_directives or \ + getattr(extension, 'cython_line_directives', 0) + no_c_in_traceback = self.no_c_in_traceback or \ + getattr(extension, 'no_c_in_traceback', 0) + cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \ + (extension.language and extension.language.lower() == 'c++') + cython_gen_pxi = self.cython_gen_pxi or getattr(extension, 'cython_gen_pxi', 0) + cython_gdb = self.cython_gdb or getattr(extension, 'cython_gdb', False) + cython_compile_time_env = self.cython_compile_time_env or \ + getattr(extension, 'cython_compile_time_env', None) + + # Set up the include_path for the Cython compiler: + # 1. Start with the command line option. + # 2. Add in any (unique) paths from the extension + # cython_include_dirs (if Cython.Distutils.extension is used). + # 3. Add in any (unique) paths from the extension include_dirs + includes = list(self.cython_include_dirs) + try: + for i in extension.cython_include_dirs: + if i not in includes: + includes.append(i) + except AttributeError: + pass + + # In case extension.include_dirs is a generator, evaluate it and keep + # result + extension.include_dirs = list(extension.include_dirs) + for i in extension.include_dirs: + if i not in includes: + includes.append(i) + + # Set up Cython compiler directives: + # 1. Start with the command line option. + # 2. Add in any (unique) entries from the extension + # cython_directives (if Cython.Distutils.extension is used). + directives = dict(self.cython_directives) + if hasattr(extension, "cython_directives"): + directives.update(extension.cython_directives) + + # Set the target file extension for C/C++ mode. + if cplus: + target_ext = '.cpp' + else: + target_ext = '.c' + + # Decide whether to drop the generated C files into the temp dir + # or the source tree. + + if not self.inplace and (self.cython_c_in_temp + or getattr(extension, 'cython_c_in_temp', 0)): + target_dir = os.path.join(self.build_temp, "pyrex") + for package_name in extension.name.split('.')[:-1]: + target_dir = os.path.join(target_dir, package_name) + else: + target_dir = None + + newest_dependency = None + for source in sources: + (base, ext) = os.path.splitext(os.path.basename(source)) + if ext == ".py": + # FIXME: we might want to special case this some more + ext = '.pyx' + if ext == ".pyx": # Cython source file + output_dir = target_dir or os.path.dirname(source) + new_sources.append(os.path.join(output_dir, base + target_ext)) + cython_sources.append(source) + cython_targets[source] = new_sources[-1] + elif ext == '.pxi' or ext == '.pxd': + if newest_dependency is None \ + or newer(source, newest_dependency): + newest_dependency = source + else: + new_sources.append(source) + + if not cython_sources: + return new_sources + + try: + from Cython.Compiler.Main \ + import CompilationOptions, \ + default_options as cython_default_options, \ + compile as cython_compile + from Cython.Compiler.Errors import PyrexError + except ImportError: + e = sys.exc_info()[1] + print("failed to import Cython: %s" % e) + raise DistutilsPlatformError("Cython does not appear to be installed") + + module_name = extension.name + + for source in cython_sources: + target = cython_targets[source] + depends = [source] + list(extension.depends or ()) + if source[-4:].lower() == ".pyx" and os.path.isfile(source[:-3] + "pxd"): + depends += [source[:-3] + "pxd"] + rebuild = self.force or newer_group(depends, target, 'newer') + if not rebuild and newest_dependency is not None: + rebuild = newer(newest_dependency, target) + if rebuild: + log.info("cythoning %s to %s", source, target) + self.mkpath(os.path.dirname(target)) + if self.inplace: + output_dir = os.curdir + else: + output_dir = self.build_lib + options = CompilationOptions(cython_default_options, + use_listing_file = create_listing, + include_path = includes, + compiler_directives = directives, + output_file = target, + cplus = cplus, + emit_linenums = line_directives, + c_line_in_traceback = not no_c_in_traceback, + generate_pxi = cython_gen_pxi, + output_dir = output_dir, + gdb_debug = cython_gdb, + compile_time_env = cython_compile_time_env) + result = cython_compile(source, options=options, + full_module_name=module_name) + else: + log.info("skipping '%s' Cython extension (up-to-date)", target) + + return new_sources + + # cython_sources () + +# class build_ext diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/bytearray.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/bytearray.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1af4a6c427bd6355ecaafcdef256907b3b98adc6 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/bytearray.pxd @@ -0,0 +1,33 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + bint PyByteArray_Check(object o) + # Return true if the object o is a bytearray object or an instance of a subtype of the bytearray type. + + bint PyByteArray_CheckExact(object o) + # Return true if the object o is a bytearray object, but not an instance of a subtype of the bytearray type. + + bytearray PyByteArray_FromObject(object o) + # Return a new bytearray object from any object, o, that implements the buffer protocol. + + bytearray PyByteArray_FromStringAndSize(char *string, Py_ssize_t len) + # Create a new bytearray object from string and its length, len. On failure, NULL is returned. + + bytearray PyByteArray_Concat(object a, object b) + # Concat bytearrays a and b and return a new bytearray with the result. + + Py_ssize_t PyByteArray_Size(object bytearray) + # Return the size of bytearray after checking for a NULL pointer. + + char* PyByteArray_AsString(object bytearray) + # Return the contents of bytearray as a char array after checking for a NULL pointer. + # The returned array always has an extra null byte appended. + + int PyByteArray_Resize(object bytearray, Py_ssize_t len) + # Resize the internal buffer of bytearray to len. + + char* PyByteArray_AS_STRING(object bytearray) + # Macro version of PyByteArray_AsString(). + + Py_ssize_t PyByteArray_GET_SIZE(object bytearray) + # Macro version of PyByteArray_Size(). diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/bytes.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/bytes.pxd new file mode 100644 index 0000000000000000000000000000000000000000..8998770d8df8cfa9436d70c41bda4c9b4428f58b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/bytes.pxd @@ -0,0 +1,200 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + ctypedef struct va_list + + ############################################################################ + # 7.3.1 String Objects + ############################################################################ + + # These functions raise TypeError when expecting a string + # parameter and are called with a non-string parameter. + # PyStringObject + # This subtype of PyObject represents a Python bytes object. + # PyTypeObject PyBytes_Type + # This instance of PyTypeObject represents the Python bytes type; + # it is the same object as bytes and types.BytesType in the Python + # layer. + + bint PyBytes_Check(object o) + # Return true if the object o is a string object or an instance of + # a subtype of the string type. + + bint PyBytes_CheckExact(object o) + # Return true if the object o is a string object, but not an instance of a subtype of the string type. + + bytes PyBytes_FromString(char *v) + # Return value: New reference. + # Return a new string object with the value v on success, and NULL + # on failure. The parameter v must not be NULL; it will not be + # checked. + + bytes PyBytes_FromStringAndSize(char *v, Py_ssize_t len) + # Return value: New reference. + # Return a new string object with the value v and length len on + # success, and NULL on failure. If v is NULL, the contents of the + # string are uninitialized. + + bytes PyBytes_FromFormat(char *format, ...) + # Return value: New reference. + # Take a C printf()-style format string and a variable number of + # arguments, calculate the size of the resulting Python string and + # return a string with the values formatted into it. The variable + # arguments must be C types and must correspond exactly to the + # format characters in the format string. The following format + # characters are allowed: + # Format Characters Type Comment + # %% n/a The literal % character. + # %c int A single character, represented as an C int. + # %d int Exactly equivalent to printf("%d"). + # %u unsigned int Exactly equivalent to printf("%u"). + # %ld long Exactly equivalent to printf("%ld"). + # %lu unsigned long Exactly equivalent to printf("%lu"). + # %zd Py_ssize_t Exactly equivalent to printf("%zd"). + # %zu size_t Exactly equivalent to printf("%zu"). + # %i int Exactly equivalent to printf("%i"). + # %x int Exactly equivalent to printf("%x"). + # %s char* A null-terminated C character array. + + # %p void* The hex representation of a C pointer. + # Mostly equivalent to printf("%p") except that it is guaranteed to + # start with the literal 0x regardless of what the platform's printf + # yields. + # An unrecognized format character causes all the rest of the + # format string to be copied as-is to the result string, and any + # extra arguments discarded. + + bytes PyBytes_FromFormatV(char *format, va_list vargs) + # Return value: New reference. + # Identical to PyBytes_FromFormat() except that it takes exactly two arguments. + + bytes PyBytes_FromObject(object o) + # Return value: New reference. + # Return the bytes representation of object o that implements the buffer protocol. + + Py_ssize_t PyBytes_Size(object string) except -1 + # Return the length of the string in string object string. + + Py_ssize_t PyBytes_GET_SIZE(object string) + # Macro form of PyBytes_Size() but without error checking. + + char* PyBytes_AsString(object string) except NULL + # Return a NUL-terminated representation of the contents of + # string. The pointer refers to the internal buffer of string, not + # a copy. The data must not be modified in any way, unless the + # string was just created using PyBytes_FromStringAndSize(NULL, + # size). It must not be deallocated. If string is a Unicode + # object, this function computes the default encoding of string + # and operates on that. If string is not a string object at all, + # PyBytes_AsString() returns NULL and raises TypeError. + + char* PyBytes_AS_STRING(object string) + # Macro form of PyBytes_AsString() but without error + # checking. Only string objects are supported; no Unicode objects + # should be passed. + + int PyBytes_AsStringAndSize(object obj, char **buffer, Py_ssize_t *length) except -1 + # Return a NULL-terminated representation of the contents of the + # object obj through the output variables buffer and length. + # + # The function accepts both string and Unicode objects as + # input. For Unicode objects it returns the default encoded + # version of the object. If length is NULL, the resulting buffer + # may not contain NUL characters; if it does, the function returns + # -1 and a TypeError is raised. + + # The buffer refers to an internal string buffer of obj, not a + # copy. The data must not be modified in any way, unless the + # string was just created using PyBytes_FromStringAndSize(NULL, + # size). It must not be deallocated. If string is a Unicode + # object, this function computes the default encoding of string + # and operates on that. If string is not a string object at all, + # PyBytes_AsStringAndSize() returns -1 and raises TypeError. + + void PyBytes_Concat(PyObject **string, object newpart) + # Create a new string object in *string containing the contents of + # newpart appended to string; the caller will own the new + # reference. The reference to the old value of string will be + # stolen. If the new string cannot be created, the old reference + # to string will still be discarded and the value of *string will + # be set to NULL; the appropriate exception will be set. + + void PyBytes_ConcatAndDel(PyObject **string, object newpart) + # Create a new string object in *string containing the contents of + # newpart appended to string. This version decrements the + # reference count of newpart. + + int _PyBytes_Resize(PyObject **string, Py_ssize_t newsize) except -1 + # A way to resize a string object even though it is + # ``immutable''. Only use this to build up a brand new string + # object; don't use this if the string may already be known in + # other parts of the code. It is an error to call this function if + # the refcount on the input string object is not one. Pass the + # address of an existing string object as an lvalue (it may be + # written into), and the new size desired. On success, *string + # holds the resized string object and 0 is returned; the address + # in *string may differ from its input value. If the reallocation + # fails, the original string object at *string is deallocated, + # *string is set to NULL, a memory exception is set, and -1 is + # returned. + + bytes PyBytes_Format(object format, object args) + # Return value: New reference. Return a new string object from + # format and args. Analogous to format % args. The args argument + # must be a tuple. + + void PyBytes_InternInPlace(PyObject **string) + # Intern the argument *string in place. The argument must be the + # address of a pointer variable pointing to a Python string + # object. If there is an existing interned string that is the same + # as *string, it sets *string to it (decrementing the reference + # count of the old string object and incrementing the reference + # count of the interned string object), otherwise it leaves + # *string alone and interns it (incrementing its reference + # count). (Clarification: even though there is a lot of talk about + # reference counts, think of this function as + # reference-count-neutral; you own the object after the call if + # and only if you owned it before the call.) + + bytes PyBytes_InternFromString(char *v) + # Return value: New reference. + # A combination of PyBytes_FromString() and + # PyBytes_InternInPlace(), returning either a new string object + # that has been interned, or a new (``owned'') reference to an + # earlier interned string object with the same value. + + object PyBytes_Decode(char *s, Py_ssize_t size, char *encoding, char *errors) + # Return value: New reference. + # Create an object by decoding size bytes of the encoded buffer s + # using the codec registered for encoding. encoding and errors + # have the same meaning as the parameters of the same name in the + # unicode() built-in function. The codec to be used is looked up + # using the Python codec registry. Return NULL if an exception was + # raised by the codec. + + object PyBytes_AsDecodedObject(object str, char *encoding, char *errors) + # Return value: New reference. + # Decode a string object by passing it to the codec registered for + # encoding and return the result as Python object. encoding and + # errors have the same meaning as the parameters of the same name + # in the string encode() method. The codec to be used is looked up + # using the Python codec registry. Return NULL if an exception was + # raised by the codec. + + object PyBytes_Encode(char *s, Py_ssize_t size, char *encoding, char *errors) + # Return value: New reference. + # Encode the char buffer of the given size by passing it to the + # codec registered for encoding and return a Python + # object. encoding and errors have the same meaning as the + # parameters of the same name in the string encode() method. The + # codec to be used is looked up using the Python codec + # registry. Return NULL if an exception was raised by the codec. + + object PyBytes_AsEncodedObject(object str, char *encoding, char *errors) + # Return value: New reference. + # Encode a string object using the codec registered for encoding + # and return the result as Python object. encoding and errors have + # the same meaning as the parameters of the same name in the + # string encode() method. The codec to be used is looked up using + # the Python codec registry. Return NULL if an exception was + # raised by the codec. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/contextvars.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/contextvars.pxd new file mode 100644 index 0000000000000000000000000000000000000000..aa80026642f38a917ca9b1a61050dd18690dce92 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/contextvars.pxd @@ -0,0 +1,140 @@ +from cpython.object cimport PyObject +from cpython.ref cimport Py_XDECREF + +cdef extern from "Python.h": + # Defining PyContextVar_Get() below to always return the default value for Py<3.7 and PyPy<7.3.6 + # to make the inline functions sort-of work. + """ + #if (PY_VERSION_HEX < 0x030700b1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030600)) && !defined(PyContextVar_Get) + #define PyContextVar_Get(var, d, v) \ + ((d) ? \ + ((void)(var), Py_INCREF(d), (v)[0] = (d), 0) : \ + ((v)[0] = NULL, 0) \ + ) + #endif + """ + + ############################################################################ + # Context Variables Objects + ############################################################################ + + # PyContext + # The C structure used to represent a `contextvars.Context` object. + + # PyContextVar + # The C structure used to represent a `contextvars.ContextVar` object. + + # PyContextToken + # The C structure used to represent a `contextvars.Token` object. + + # PyTypeObject PyContext_Type + # Type object representing the `contextvars.Context` type. + + # PyTypeObject PyContextVar_Type + # Type object representing the `contextvars.ContextVar` type. + + # PyTypeObject PyContextToken_Type + # Type object representing the `contextvars.Token` type. + + bint PyContext_CheckExact(object obj) + # Return `true` if `obj` is of type `PyContext_Type`. + # `obj` must not be NULL. This function always succeeds. + + bint PyContextVar_CheckExact(object obj) + # Return `true` if `obj` is of type `PyContextVar_Type`. + # `obj` must not be NULL. This function always succeeds. + + bint PyContextToken_CheckExact(object obj) + # Return `true` if `obj` is of type `PyContextToken_Type`. + # `obj` must not be NULL. This function always succeeds. + + object PyContext_New() + # Return value: New reference. + # Create a new empty context object. + # Returns NULL if an error has occurred. + + object PyContext_Copy(object ctx) + # Return value: New reference. + # Create a shallow copy of the passed `ctx` context object. + # Returns NULL if an error has occurred. + + object PyContext_CopyCurrent() + # Return value: New reference. + # Create a shallow copy of the current thread context. + # Returns NULL if an error has occurred. + + int PyContext_Enter(object ctx) except -1 + # Set `ctx` as the current context for the current thread. + # Returns 0 on success, and -1 on error. + + int PyContext_Exit(object ctx) except -1 + # Deactivate the `ctx` context and restore the previous context + # as the current context for the current thread. + # Returns 0 on success, and -1 on error. + + object PyContextVar_New(const char* name, PyObject* default_value) + # Return value: New reference. + # Create a new ContextVar object. The `name` parameter is used + # for introspection and debug purposes. The `default_value` parameter + # may optionally specify the default value for the context variable. + # If an error has occurred, this function returns NULL. + + object PyContextVar_New_with_default "PyContextVar_New" (const char* name, object default_value) + # A different declaration of PyContextVar_New that requires a default value + # to be passed on call. + + int PyContextVar_Get(object var, PyObject* default_value, PyObject** value) except -1 + # Get the value of a context variable. + # Returns -1 if an error has occurred during lookup, and 0 if no error + # occurred, whether or not a value was found. + # + # If the context variable was found, `value` will be a pointer to it. + # If the context variable was not found, `value` will point to: + # + # • `default_value`, if not NULL; + # • the default value of `var`, if not NULL; + # • NULL + int PyContextVar_Get_with_default "PyContextVar_Get" (object var, object default_value, PyObject** value) except -1 + # A different declaration of PyContextVar_Get that requires a default value + # to be passed on call. + + object PyContextVar_Set(object var, object value) + # Return value: New reference. + # Set the value of `var` to `value` in the current context. + # Returns a token object for this value change, or NULL if an error has occurred. + + int PyContextVar_Reset(object var, object token) except -1 + # Reset the state of the `var` context variable to that it was in + # before `PyContextVar_Set()` that returned `token` was called. + # This function returns 0 on success and -1 on error. + + +cdef inline object get_value(var, default_value=None): + """Return a new reference to the value of the context variable, + or the default value of the context variable, + or None if no such value or default was found. + """ + cdef PyObject *value = NULL + PyContextVar_Get(var, NULL, &value) + if value is NULL: + # context variable does not have a default + pyvalue = default_value + else: + # value or default value of context variable + pyvalue = value + Py_XDECREF(value) # PyContextVar_Get() returned an owned reference as 'PyObject*' + return pyvalue + + +cdef inline object get_value_no_default(var, default_value=None): + """Return a new reference to the value of the context variable, + or the provided default value if no such value was found. + + Ignores the default value of the context variable, if any. + """ + cdef PyObject *value = NULL + PyContextVar_Get(var, default_value, &value) + # value of context variable or 'default_value' + pyvalue = value + Py_XDECREF(value) # PyContextVar_Get() returned an owned reference as 'PyObject*' + return pyvalue diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/datetime.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/datetime.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3dce39588826b9f886266b05fda05f3258ba790a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/datetime.pxd @@ -0,0 +1,426 @@ +from cpython.object cimport PyObject +from cpython.version cimport PY_VERSION_HEX + +cdef extern from "Python.h": + ctypedef struct PyTypeObject: + pass + +cdef extern from "datetime.h": + """ + /* Backport for Python 2.x */ + #if PY_MAJOR_VERSION < 3 + #ifndef PyDateTime_DELTA_GET_DAYS + #define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) + #endif + #ifndef PyDateTime_DELTA_GET_SECONDS + #define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) + #endif + #ifndef PyDateTime_DELTA_GET_MICROSECONDS + #define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) + #endif + #endif + + /* Backport for Python < 3.6 */ + #if PY_VERSION_HEX < 0x030600a4 + #ifndef PyDateTime_TIME_GET_FOLD + #define PyDateTime_TIME_GET_FOLD(o) ((void)(o), 0) + #endif + #ifndef PyDateTime_DATE_GET_FOLD + #define PyDateTime_DATE_GET_FOLD(o) ((void)(o), 0) + #endif + #endif + + /* Backport for Python < 3.6 */ + #if PY_VERSION_HEX < 0x030600a4 + #define __Pyx_DateTime_DateTimeWithFold(year, month, day, hour, minute, second, microsecond, tz, fold) \ + ((void)(fold), PyDateTimeAPI->DateTime_FromDateAndTime(year, month, day, hour, minute, second, \ + microsecond, tz, PyDateTimeAPI->DateTimeType)) + #define __Pyx_DateTime_TimeWithFold(hour, minute, second, microsecond, tz, fold) \ + ((void)(fold), PyDateTimeAPI->Time_FromTime(hour, minute, second, microsecond, tz, PyDateTimeAPI->TimeType)) + #else /* For Python 3.6+ so that we can pass tz */ + #define __Pyx_DateTime_DateTimeWithFold(year, month, day, hour, minute, second, microsecond, tz, fold) \ + PyDateTimeAPI->DateTime_FromDateAndTimeAndFold(year, month, day, hour, minute, second, \ + microsecond, tz, fold, PyDateTimeAPI->DateTimeType) + #define __Pyx_DateTime_TimeWithFold(hour, minute, second, microsecond, tz, fold) \ + PyDateTimeAPI->Time_FromTimeAndFold(hour, minute, second, microsecond, tz, fold, PyDateTimeAPI->TimeType) + #endif + + /* Backport for Python < 3.7 */ + #if PY_VERSION_HEX < 0x030700b1 + #define __Pyx_TimeZone_UTC NULL + #define __Pyx_TimeZone_FromOffsetAndName(offset, name) ((void)(offset), (void)(name), (PyObject*)NULL) + #else + #define __Pyx_TimeZone_UTC PyDateTime_TimeZone_UTC + #define __Pyx_TimeZone_FromOffsetAndName(offset, name) PyTimeZone_FromOffsetAndName(offset, name) + #endif + + /* Backport for Python < 3.10 */ + #if PY_VERSION_HEX < 0x030a00a1 + #ifndef PyDateTime_TIME_GET_TZINFO + #define PyDateTime_TIME_GET_TZINFO(o) \ + ((((PyDateTime_Time*)o)->hastzinfo) ? ((PyDateTime_Time*)o)->tzinfo : Py_None) + #endif + #ifndef PyDateTime_DATE_GET_TZINFO + #define PyDateTime_DATE_GET_TZINFO(o) \ + ((((PyDateTime_DateTime*)o)->hastzinfo) ? ((PyDateTime_DateTime*)o)->tzinfo : Py_None) + #endif + #endif + """ + + ctypedef extern class datetime.date[object PyDateTime_Date]: + @property + cdef inline int year(self) noexcept: + return PyDateTime_GET_YEAR(self) + + @property + cdef inline int month(self) noexcept: + return PyDateTime_GET_MONTH(self) + + @property + cdef inline int day(self) noexcept: + return PyDateTime_GET_DAY(self) + + ctypedef extern class datetime.time[object PyDateTime_Time]: + @property + cdef inline int hour(self) noexcept: + return PyDateTime_TIME_GET_HOUR(self) + + @property + cdef inline int minute(self) noexcept: + return PyDateTime_TIME_GET_MINUTE(self) + + @property + cdef inline int second(self) noexcept: + return PyDateTime_TIME_GET_SECOND(self) + + @property + cdef inline int microsecond(self) noexcept: + return PyDateTime_TIME_GET_MICROSECOND(self) + + @property + cdef inline object tzinfo(self): + return PyDateTime_TIME_GET_TZINFO(self) + + @property + cdef inline int fold(self) noexcept: + # For Python < 3.6 this returns 0 no matter what + return PyDateTime_TIME_GET_FOLD(self) + + ctypedef extern class datetime.datetime[object PyDateTime_DateTime]: + @property + cdef inline int year(self) noexcept: + return PyDateTime_GET_YEAR(self) + + @property + cdef inline int month(self) noexcept: + return PyDateTime_GET_MONTH(self) + + @property + cdef inline int day(self) noexcept: + return PyDateTime_GET_DAY(self) + + @property + cdef inline int hour(self) noexcept: + return PyDateTime_DATE_GET_HOUR(self) + + @property + cdef inline int minute(self) noexcept: + return PyDateTime_DATE_GET_MINUTE(self) + + @property + cdef inline int second(self) noexcept: + return PyDateTime_DATE_GET_SECOND(self) + + @property + cdef inline int microsecond(self) noexcept: + return PyDateTime_DATE_GET_MICROSECOND(self) + + @property + cdef inline object tzinfo(self): + return PyDateTime_DATE_GET_TZINFO(self) + + @property + cdef inline int fold(self) noexcept: + # For Python < 3.6 this returns 0 no matter what + return PyDateTime_DATE_GET_FOLD(self) + + ctypedef extern class datetime.timedelta[object PyDateTime_Delta]: + @property + cdef inline int day(self) noexcept: + return PyDateTime_DELTA_GET_DAYS(self) + + @property + cdef inline int second(self) noexcept: + return PyDateTime_DELTA_GET_SECONDS(self) + + @property + cdef inline int microsecond(self) noexcept: + return PyDateTime_DELTA_GET_MICROSECONDS(self) + + ctypedef extern class datetime.tzinfo[object PyDateTime_TZInfo]: + pass + + ctypedef struct PyDateTime_Date: + pass + + ctypedef struct PyDateTime_Time: + unsigned char fold + char hastzinfo + PyObject *tzinfo + + ctypedef struct PyDateTime_DateTime: + unsigned char fold + char hastzinfo + PyObject *tzinfo + + ctypedef struct PyDateTime_Delta: + int days + int seconds + int microseconds + + # Define structure for C API. + ctypedef struct PyDateTime_CAPI: + # type objects + PyTypeObject *DateType + PyTypeObject *DateTimeType + PyTypeObject *TimeType + PyTypeObject *DeltaType + PyTypeObject *TZInfoType + + # constructors + date (*Date_FromDate)(int, int, int, PyTypeObject*) + datetime (*DateTime_FromDateAndTime)(int, int, int, int, int, int, int, object, PyTypeObject*) + time (*Time_FromTime)(int, int, int, int, object, PyTypeObject*) + timedelta (*Delta_FromDelta)(int, int, int, int, PyTypeObject*) + + # constructors for the DB API + datetime (*DateTime_FromTimestamp)(PyObject*, object, PyObject*) + date (*Date_FromTimestamp)(PyObject*, object) + + # We cannot use the following because they do not compile in older Python versions. + # Instead, we use datetime.h's macros here that we can backport in C. + + # Python 3.7+ constructors + object (*TimeZone_FromTimeZone)(object offset, PyObject *name) + + # Python 3.7+ singletons + PyObject *TimeZone_UTC + + # Python 3.6+ PEP 495 constructors + datetime (*DateTime_FromDateAndTimeAndFold)(int, int, int, int, int, int, int, object, int, PyTypeObject*) + time (*Time_FromTimeAndFold)(int, int, int ,int, object, int, PyTypeObject*) + + # Check type of the object. + bint PyDate_Check(object op) + bint PyDate_CheckExact(object op) + + bint PyDateTime_Check(object op) + bint PyDateTime_CheckExact(object op) + + bint PyTime_Check(object op) + bint PyTime_CheckExact(object op) + + bint PyDelta_Check(object op) + bint PyDelta_CheckExact(object op) + + bint PyTZInfo_Check(object op) + bint PyTZInfo_CheckExact(object op) + + # Getters for date and datetime (C macros). + int PyDateTime_GET_YEAR(object o) + int PyDateTime_GET_MONTH(object o) + int PyDateTime_GET_DAY(object o) + + # Getters for datetime (C macros). + int PyDateTime_DATE_GET_HOUR(object o) + int PyDateTime_DATE_GET_MINUTE(object o) + int PyDateTime_DATE_GET_SECOND(object o) + int PyDateTime_DATE_GET_MICROSECOND(object o) + int PyDateTime_DATE_GET_FOLD(object o) + PyObject* PyDateTime_DATE_GET_TZINFO(object o) # returns a borrowed reference + + # Getters for time (C macros). + int PyDateTime_TIME_GET_HOUR(object o) + int PyDateTime_TIME_GET_MINUTE(object o) + int PyDateTime_TIME_GET_SECOND(object o) + int PyDateTime_TIME_GET_MICROSECOND(object o) + int PyDateTime_TIME_GET_FOLD(object o) + PyObject* PyDateTime_TIME_GET_TZINFO(object o) # returns a borrowed reference + + # Getters for timedelta (C macros). + int PyDateTime_DELTA_GET_DAYS(object o) + int PyDateTime_DELTA_GET_SECONDS(object o) + int PyDateTime_DELTA_GET_MICROSECONDS(object o) + + # Constructors + object PyTimeZone_FromOffset(object offset) + object PyTimeZone_FromOffsetAndName(object offset, object name) + + # The above macros is Python 3.7+ so we use these instead + object __Pyx_TimeZone_FromOffsetAndName(object offset, PyObject* name) + + # Constructors for the DB API + datetime PyDateTime_FromTimeStamp(object args) + date PyDate_FromTimeStamp(object args) + + # PEP 495 constructors but patched above to allow passing tz + datetime __Pyx_DateTime_DateTimeWithFold(int, int, int, int, int, int, int, object, int) + datetime __Pyx_DateTime_TimeWithFold(int, int, int ,int, object, int) + + # PyDateTime CAPI object. + PyDateTime_CAPI *PyDateTimeAPI + + PyObject* PyDateTime_TimeZone_UTC + + # PyDateTime_TimeZone_UTC is Python 3.7+ so instead we use the following macro + PyObject* __Pyx_TimeZone_UTC + + void PyDateTime_IMPORT() + +# Datetime C API initialization function. +# You have to call it before any usage of DateTime CAPI functions. +cdef inline void import_datetime() noexcept: + PyDateTime_IMPORT + +# Create date object using DateTime CAPI factory function. +# Note, there are no range checks for any of the arguments. +cdef inline date date_new(int year, int month, int day): + return PyDateTimeAPI.Date_FromDate(year, month, day, PyDateTimeAPI.DateType) + +# Create time object using DateTime CAPI factory function +# Note, there are no range checks for any of the arguments. +cdef inline time time_new(int hour, int minute, int second, int microsecond, object tz, int fold=0): + return __Pyx_DateTime_TimeWithFold(hour, minute, second, microsecond, tz, fold) + +# Create datetime object using DateTime CAPI factory function. +# Note, there are no range checks for any of the arguments. +cdef inline datetime datetime_new(int year, int month, int day, int hour, int minute, int second, int microsecond, object tz, int fold=0): + return __Pyx_DateTime_DateTimeWithFold(year, month, day, hour, minute, second, microsecond, tz, fold) + +# Create timedelta object using DateTime CAPI factory function. +# Note, there are no range checks for any of the arguments. +cdef inline timedelta timedelta_new(int days, int seconds, int useconds): + return PyDateTimeAPI.Delta_FromDelta(days, seconds, useconds, 1, PyDateTimeAPI.DeltaType) + +# Create timedelta object using DateTime CAPI factory function. +cdef inline object timezone_new(object offset, object name=None): + if PY_VERSION_HEX < 0x030700b1: + raise RuntimeError('Time zones are not available from the C-API.') + return __Pyx_TimeZone_FromOffsetAndName(offset, name if name is not None else NULL) + +# Create datetime object using DB API constructor. +cdef inline datetime datetime_from_timestamp(timestamp, tz=None): + return PyDateTimeAPI.DateTime_FromTimestamp( + PyDateTimeAPI.DateTimeType, (timestamp, tz) if tz is not None else (timestamp,), NULL) + +# Create date object using DB API constructor. +cdef inline date date_from_timestamp(timestamp): + return PyDateTimeAPI.Date_FromTimestamp(PyDateTimeAPI.DateType, (timestamp,)) + +# More recognizable getters for date/time/datetime/timedelta. +# There are no setters because datetime.h hasn't them. +# This is because of immutable nature of these objects by design. +# If you would change time/date/datetime/timedelta object you need to recreate. + +# Get UTC singleton +cdef inline object get_utc(): + if PY_VERSION_HEX < 0x030700b1: + raise RuntimeError('Time zones are not available from the C-API.') + return __Pyx_TimeZone_UTC + +# Get tzinfo of time +cdef inline object time_tzinfo(object o): + return PyDateTime_TIME_GET_TZINFO(o) + +# Get tzinfo of datetime +cdef inline object datetime_tzinfo(object o): + return PyDateTime_DATE_GET_TZINFO(o) + +# Get year of date +cdef inline int date_year(object o) noexcept: + return PyDateTime_GET_YEAR(o) + +# Get month of date +cdef inline int date_month(object o) noexcept: + return PyDateTime_GET_MONTH(o) + +# Get day of date +cdef inline int date_day(object o) noexcept: + return PyDateTime_GET_DAY(o) + +# Get year of datetime +cdef inline int datetime_year(object o) noexcept: + return PyDateTime_GET_YEAR(o) + +# Get month of datetime +cdef inline int datetime_month(object o) noexcept: + return PyDateTime_GET_MONTH(o) + +# Get day of datetime +cdef inline int datetime_day(object o) noexcept: + return PyDateTime_GET_DAY(o) + +# Get hour of time +cdef inline int time_hour(object o) noexcept: + return PyDateTime_TIME_GET_HOUR(o) + +# Get minute of time +cdef inline int time_minute(object o) noexcept: + return PyDateTime_TIME_GET_MINUTE(o) + +# Get second of time +cdef inline int time_second(object o) noexcept: + return PyDateTime_TIME_GET_SECOND(o) + +# Get microsecond of time +cdef inline int time_microsecond(object o) noexcept: + return PyDateTime_TIME_GET_MICROSECOND(o) + +# Get fold of time +cdef inline int time_fold(object o) noexcept: + # For Python < 3.6 this returns 0 no matter what + return PyDateTime_TIME_GET_FOLD(o) + +# Get hour of datetime +cdef inline int datetime_hour(object o) noexcept: + return PyDateTime_DATE_GET_HOUR(o) + +# Get minute of datetime +cdef inline int datetime_minute(object o) noexcept: + return PyDateTime_DATE_GET_MINUTE(o) + +# Get second of datetime +cdef inline int datetime_second(object o) noexcept: + return PyDateTime_DATE_GET_SECOND(o) + +# Get microsecond of datetime +cdef inline int datetime_microsecond(object o) noexcept: + return PyDateTime_DATE_GET_MICROSECOND(o) + +# Get fold of datetime +cdef inline int datetime_fold(object o) noexcept: + # For Python < 3.6 this returns 0 no matter what + return PyDateTime_DATE_GET_FOLD(o) + +# Get days of timedelta +cdef inline int timedelta_days(object o) noexcept: + return (o).days + +# Get seconds of timedelta +cdef inline int timedelta_seconds(object o) noexcept: + return (o).seconds + +# Get microseconds of timedelta +cdef inline int timedelta_microseconds(object o) noexcept: + return (o).microseconds + +cdef inline double total_seconds(timedelta obj) noexcept: + # Mirrors the "timedelta.total_seconds()" method. + # Note that this implementation is not guaranteed to give *exactly* the same + # result as the original method, due to potential differences in floating point rounding. + cdef: + double days, seconds, micros + days = PyDateTime_DELTA_GET_DAYS(obj) + seconds = PyDateTime_DELTA_GET_SECONDS(obj) + micros = PyDateTime_DELTA_GET_MICROSECONDS(obj) + return days * 24 * 3600 + seconds + micros / 1_000_000 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/dict.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/dict.pxd new file mode 100644 index 0000000000000000000000000000000000000000..979dd392a07ea458604770b99a526cb7dd3998e5 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/dict.pxd @@ -0,0 +1,186 @@ +from .object cimport PyObject +from .pyport cimport uint64_t + +cdef extern from "Python.h": + # On Python 2, PyDict_GetItemWithError is called _PyDict_GetItemWithError + """ + #if PY_MAJOR_VERSION <= 2 + #define PyDict_GetItemWithError _PyDict_GetItemWithError + #endif + """ + + ############################################################################ + # 7.4.1 Dictionary Objects + ############################################################################ + + # PyDictObject + # + # This subtype of PyObject represents a Python dictionary object + # (i.e. the 'dict' type). + + # PyTypeObject PyDict_Type + # + # This instance of PyTypeObject represents the Python dictionary + # type. This is exposed to Python programs as dict and + # types.DictType. + + bint PyDict_Check(object p) + # Return true if p is a dict object or an instance of a subtype of + # the dict type. + + bint PyDict_CheckExact(object p) + # Return true if p is a dict object, but not an instance of a + # subtype of the dict type. + + dict PyDict_New() + # Return value: New reference. + # Return a new empty dictionary, or NULL on failure. + + object PyDictProxy_New(object dict) + # Return value: New reference. + # Return a proxy object for a mapping which enforces read-only + # behavior. This is normally used to create a proxy to prevent + # modification of the dictionary for non-dynamic class types. + + void PyDict_Clear(object p) + # Empty an existing dictionary of all key-value pairs. + + int PyDict_Contains(object p, object key) except -1 + # Determine if dictionary p contains key. If an item in p is + # matches key, return 1, otherwise return 0. On error, return + # -1. This is equivalent to the Python expression "key in p". + + dict PyDict_Copy(object p) + # Return value: New reference. + # Return a new dictionary that contains the same key-value pairs as p. + + int PyDict_SetItem(object p, object key, object val) except -1 + # Insert value into the dictionary p with a key of key. key must + # be hashable; if it isn't, TypeError will be raised. Return 0 on + # success or -1 on failure. + + int PyDict_SetItemString(object p, const char *key, object val) except -1 + # Insert value into the dictionary p using key as a key. key + # should be a char*. The key object is created using + # PyString_FromString(key). Return 0 on success or -1 on failure. + + int PyDict_DelItem(object p, object key) except -1 + # Remove the entry in dictionary p with key key. key must be + # hashable; if it isn't, TypeError is raised. Return 0 on success + # or -1 on failure. + + int PyDict_DelItemString(object p, const char *key) except -1 + # Remove the entry in dictionary p which has a key specified by + # the string key. Return 0 on success or -1 on failure. + + PyObject* PyDict_GetItem(object p, object key) + # Return value: Borrowed reference. + # Return the object from dictionary p which has a key key. Return + # NULL if the key key is not present, but without setting an + # exception. + + PyObject* PyDict_GetItemWithError(object p, object key) except? NULL + # Return value: Borrowed reference. + # Variant of PyDict_GetItem() that does not suppress exceptions. Return + # NULL with an exception set if an exception occurred. Return NULL + # without an exception set if the key wasn’t present. + + PyObject* PyDict_GetItemString(object p, const char *key) + # Return value: Borrowed reference. + # This is the same as PyDict_GetItem(), but key is specified as a + # char*, rather than a PyObject*. + + PyObject* PyDict_SetDefault(object p, object key, object default) except NULL + # Return value: Borrowed reference. + # This is the same as the Python-level dict.setdefault(). If present, it + # returns the value corresponding to key from the dictionary p. If the key + # is not in the dict, it is inserted with value defaultobj and defaultobj + # is returned. This function evaluates the hash function of key only once, + # instead of evaluating it independently for the lookup and the insertion. + + list PyDict_Items(object p) + # Return value: New reference. + # Return a PyListObject containing all the items from the + # dictionary, as in the dictionary method items() (see the Python + # Library Reference). + + list PyDict_Keys(object p) + # Return value: New reference. + # Return a PyListObject containing all the keys from the + # dictionary, as in the dictionary method keys() (see the Python + # Library Reference). + + list PyDict_Values(object p) + # Return value: New reference. + # Return a PyListObject containing all the values from the + # dictionary p, as in the dictionary method values() (see the + # Python Library Reference). + + Py_ssize_t PyDict_Size(object p) except -1 + # Return the number of items in the dictionary. This is equivalent + # to "len(p)" on a dictionary. + + int PyDict_Next(object p, Py_ssize_t *ppos, PyObject* *pkey, PyObject* *pvalue) + # Iterate over all key-value pairs in the dictionary p. The int + # referred to by ppos must be initialized to 0 prior to the first + # call to this function to start the iteration; the function + # returns true for each pair in the dictionary, and false once all + # pairs have been reported. The parameters pkey and pvalue should + # either point to PyObject* variables that will be filled in with + # each key and value, respectively, or may be NULL. Any references + # returned through them are borrowed. ppos should not be altered + # during iteration. Its value represents offsets within the + # internal dictionary structure, and since the structure is + # sparse, the offsets are not consecutive. + # For example: + # + #object key, *value; + #int pos = 0; + # + #while (PyDict_Next(self->dict, &pos, &key, &value)) { + # /* do something interesting with the values... */ + # ... + #} + # The dictionary p should not be mutated during iteration. It is + # safe (since Python 2.1) to modify the values of the keys as you + # iterate over the dictionary, but only so long as the set of keys + # does not change. For example: + # object key, *value; + # int pos = 0; + # while (PyDict_Next(self->dict, &pos, &key, &value)) { + # int i = PyInt_AS_LONG(value) + 1; + # object o = PyInt_FromLong(i); + # if (o == NULL) + # return -1; + # if (PyDict_SetItem(self->dict, key, o) < 0) { + # Py_DECREF(o); + # return -1; + # } + # Py_DECREF(o); + # } + + int PyDict_Merge(object a, object b, int override) except -1 + # Iterate over mapping object b adding key-value pairs to + # dictionary a. b may be a dictionary, or any object supporting + # PyMapping_Keys() and PyObject_GetItem(). If override is true, + # existing pairs in a will be replaced if a matching key is found + # in b, otherwise pairs will only be added if there is not a + # matching key in a. Return 0 on success or -1 if an exception was + # raised. + + int PyDict_Update(object a, object b) except -1 + # This is the same as PyDict_Merge(a, b, 1) in C, or a.update(b) + # in Python. Return 0 on success or -1 if an exception was raised. + + int PyDict_MergeFromSeq2(object a, object seq2, int override) except -1 + # Update or merge into dictionary a, from the key-value pairs in + # seq2. seq2 must be an iterable object producing iterable objects + # of length 2, viewed as key-value pairs. In case of duplicate + # keys, the last wins if override is true, else the first + # wins. Return 0 on success or -1 if an exception was + # raised. Equivalent Python (except for the return value): + # + #def PyDict_MergeFromSeq2(a, seq2, override): + # for key, value in seq2: + # if override or key not in a: + # a[key] = value diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/genobject.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/genobject.pxd new file mode 100644 index 0000000000000000000000000000000000000000..337b3cc0adc012a658e819fdbc8b67f8fb473e00 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/genobject.pxd @@ -0,0 +1,25 @@ +from .pystate cimport PyFrameObject + +cdef extern from "Python.h": + + ########################################################################### + # Generator Objects + ########################################################################### + + bint PyGen_Check(object ob) + # Return true if ob is a generator object; ob must not be NULL. + + bint PyGen_CheckExact(object ob) + # Return true if ob's type is PyGen_Type; ob must not be NULL. + + object PyGen_New(PyFrameObject *frame) + # Return value: New reference. + # Create and return a new generator object based on the frame object. A + # reference to frame is stolen by this function. The argument must not be + # NULL. + + object PyGen_NewWithQualName(PyFrameObject *frame, object name, object qualname) + # Return value: New reference. + # Create and return a new generator object based on the frame object, with + # __name__ and __qualname__ set to name and qualname. A reference to frame + # is stolen by this function. The frame argument must not be NULL. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/iterator.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/iterator.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0e10907f7f1e673b3e0dd2f6d363c80206b6cde2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/iterator.pxd @@ -0,0 +1,36 @@ +cdef extern from "Python.h": + + ############################################################################ + # 6.5 Iterator Protocol + ############################################################################ + bint PyIter_Check(object o) + # Return true if the object o supports the iterator protocol. + + object PyIter_Next(object o) + # Return value: New reference. + # Return the next value from the iteration o. If the object is an + # iterator, this retrieves the next value from the iteration, and + # returns NULL with no exception set if there are no remaining + # items. If the object is not an iterator, TypeError is raised, or + # if there is an error in retrieving the item, returns NULL and + # passes along the exception. + + # To write a loop which iterates over an iterator, the C code should look something like this: + # PyObject *iterator = PyObject_GetIter(obj); + # PyObject *item; + # if (iterator == NULL) { + # /* propagate error */ + # } + # while (item = PyIter_Next(iterator)) { + # /* do something with item */ + # ... + # /* release reference when done */ + # Py_DECREF(item); + # } + # Py_DECREF(iterator); + # if (PyErr_Occurred()) { + # /* propagate error */ + # } + # else { + # /* continue doing useful work */ + # } diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/method.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/method.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f51ebcc7c75b41477211925f13e87a1bea7dacb3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/method.pxd @@ -0,0 +1,49 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + ############################################################################ + # 7.5.4 Method Objects + ############################################################################ + + # There are some useful functions that are useful for working with method objects. + # PyTypeObject PyMethod_Type + # This instance of PyTypeObject represents the Python method type. This is exposed to Python programs as types.MethodType. + + bint PyMethod_Check(object o) + # Return true if o is a method object (has type + # PyMethod_Type). The parameter must not be NULL. + + object PyMethod_New(object func, object self, object cls) + # Return value: New reference. + # Return a new method object, with func being any callable object; + # this is the function that will be called when the method is + # called. If this method should be bound to an instance, self + # should be the instance and class should be the class of self, + # otherwise self should be NULL and class should be the class + # which provides the unbound method.. + + PyObject* PyMethod_Class(object meth) except NULL + # Return value: Borrowed reference. + # Return the class object from which the method meth was created; + # if this was created from an instance, it will be the class of + # the instance. + + PyObject* PyMethod_GET_CLASS(object meth) + # Return value: Borrowed reference. + # Macro version of PyMethod_Class() which avoids error checking. + + PyObject* PyMethod_Function(object meth) except NULL + # Return value: Borrowed reference. + # Return the function object associated with the method meth. + + PyObject* PyMethod_GET_FUNCTION(object meth) + # Return value: Borrowed reference. + # Macro version of PyMethod_Function() which avoids error checking. + + PyObject* PyMethod_Self(object meth) except? NULL + # Return value: Borrowed reference. + # Return the instance associated with the method meth if it is bound, otherwise return NULL. + + PyObject* PyMethod_GET_SELF(object meth) + # Return value: Borrowed reference. + # Macro version of PyMethod_Self() which avoids error checking. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pylifecycle.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pylifecycle.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2c71e371634cf239ffca29e986eae2f7dc239f4f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pylifecycle.pxd @@ -0,0 +1,68 @@ +# Interfaces to configure, query, create & destroy the Python runtime + +from libc.stdio cimport FILE +from .pystate cimport PyThreadState + + +cdef extern from "Python.h": + ctypedef int wchar_t + + void Py_SetProgramName(wchar_t *) + wchar_t *Py_GetProgramName() + + void Py_SetPythonHome(wchar_t *) + wchar_t *Py_GetPythonHome() + + # Only used by applications that embed the interpreter and need to + # override the standard encoding determination mechanism + int Py_SetStandardStreamEncoding(const char *encoding, const char *errors) + + void Py_Initialize() + void Py_InitializeEx(int) + void _Py_InitializeEx_Private(int, int) + void Py_Finalize() + int Py_FinalizeEx() + int Py_IsInitialized() + PyThreadState *Py_NewInterpreter() + void Py_EndInterpreter(PyThreadState *) + + + # _Py_PyAtExit is for the atexit module, Py_AtExit is for low-level + # exit functions. + void _Py_PyAtExit(void (*func)(object), object) + int Py_AtExit(void (*func)()) + + void Py_Exit(int) + + # Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. + void _Py_RestoreSignals() + + int Py_FdIsInteractive(FILE *, const char *) + + # Bootstrap __main__ (defined in Modules/main.c) + int Py_Main(int argc, wchar_t **argv) + + # In getpath.c + wchar_t *Py_GetProgramFullPath() + wchar_t *Py_GetPrefix() + wchar_t *Py_GetExecPrefix() + wchar_t *Py_GetPath() + void Py_SetPath(const wchar_t *) + int _Py_CheckPython3() + + # In their own files + const char *Py_GetVersion() + const char *Py_GetPlatform() + const char *Py_GetCopyright() + const char *Py_GetCompiler() + const char *Py_GetBuildInfo() + const char *_Py_gitidentifier() + const char *_Py_gitversion() + + ctypedef void (*PyOS_sighandler_t)(int) + PyOS_sighandler_t PyOS_getsig(int) + PyOS_sighandler_t PyOS_setsig(int, PyOS_sighandler_t) + + # Random + int _PyOS_URandom(void *buffer, Py_ssize_t size) + int _PyOS_URandomNonblock(void *buffer, Py_ssize_t size) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/time.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/time.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f1a9f2967cd50b750b6cebd62c593a49ddd735d2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/time.pxd @@ -0,0 +1,79 @@ +""" +Cython implementation of (parts of) the standard library time module. +""" + +from libc.stdint cimport int64_t +from cpython.exc cimport PyErr_SetFromErrno + +cdef extern from *: + """ + #if PY_VERSION_HEX >= 0x030d00b1 || defined(PyTime_t) + #define __Pyx_PyTime_t PyTime_t + #else + #define __Pyx_PyTime_t _PyTime_t + #endif + + #if PY_VERSION_HEX >= 0x030d00b1 || defined(PyTime_TimeRaw) + static CYTHON_INLINE __Pyx_PyTime_t __Pyx_PyTime_TimeUnchecked(void) { + __Pyx_PyTime_t tic; + (void) PyTime_TimeRaw(&tic); + return tic; + } + #else + #define __Pyx_PyTime_TimeUnchecked() _PyTime_GetSystemClock() + #endif + + #if PY_VERSION_HEX >= 0x030d00b1 || defined(PyTime_AsSecondsDouble) + #define __Pyx_PyTime_AsSecondsDouble(t) PyTime_AsSecondsDouble(t) + #else + #define __Pyx_PyTime_AsSecondsDouble(t) _PyTime_AsSecondsDouble(t) + #endif + """ + ctypedef int64_t PyTime_t "__Pyx_PyTime_t" + ctypedef int64_t _PyTime_t "__Pyx_PyTime_t" + + _PyTime_t _PyTime_GetSystemClock "__Pyx_PyTime_TimeUnchecked" () nogil + _PyTime_t PyTime_TimeUnchecked "__Pyx_PyTime_TimeUnchecked" () nogil + + double _PyTime_AsSecondsDouble "__Pyx_PyTime_AsSecondsDouble" (_PyTime_t t) nogil + double PyTime_AsSecondsDouble "__Pyx_PyTime_AsSecondsDouble" (_PyTime_t t) nogil + +from libc.time cimport ( + tm, + time_t, + localtime as libc_localtime, +) + + +cdef inline double time() noexcept nogil: + cdef: + _PyTime_t tic + + tic = PyTime_TimeUnchecked() + return PyTime_AsSecondsDouble(tic) + + +cdef inline int _raise_from_errno() except -1 with gil: + PyErr_SetFromErrno(RuntimeError) + return -1 # Let the C compiler know that this function always raises. + + +cdef inline tm localtime() except * nogil: + """ + Analogue to the stdlib time.localtime. The returned struct + has some entries that the stdlib version does not: tm_gmtoff, tm_zone + """ + cdef: + time_t tic = time() + tm* result + + result = libc_localtime(&tic) + if result is NULL: + _raise_from_errno() + # Fix 0-based date values (and the 1900-based year). + # See tmtotuple() in https://github.com/python/cpython/blob/master/Modules/timemodule.c + result.tm_year += 1900 + result.tm_mon += 1 + result.tm_wday = ((result.tm_wday + 6) % 7) + result.tm_yday += 1 + return result[0] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/tuple.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/tuple.pxd new file mode 100644 index 0000000000000000000000000000000000000000..907033fe45367a0d0b54618948f2bb7f400533bc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/tuple.pxd @@ -0,0 +1,72 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + + ############################################################################ + # Tuples + ############################################################################ + + bint PyTuple_Check(object p) + # Return true if p is a tuple object or an instance of a subtype + # of the tuple type. + + bint PyTuple_CheckExact(object p) + # Return true if p is a tuple object, but not an instance of a subtype of the tuple type. + + tuple PyTuple_New(Py_ssize_t len) + # Return value: New reference. + # Return a new tuple object of size len, or NULL on failure. + + tuple PyTuple_Pack(Py_ssize_t n, ...) + # Return value: New reference. + # Return a new tuple object of size n, or NULL on failure. The + # tuple values are initialized to the subsequent n C arguments + # pointing to Python objects. "PyTuple_Pack(2, a, b)" is + # equivalent to "Py_BuildValue("(OO)", a, b)". + + Py_ssize_t PyTuple_Size(object p) except -1 + # Take a pointer to a tuple object, and return the size of that tuple. + + Py_ssize_t PyTuple_GET_SIZE(object p) + # Return the size of the tuple p, which must be non-NULL and point + # to a tuple; no error checking is performed. + + PyObject* PyTuple_GetItem(object p, Py_ssize_t pos) except NULL + # Return value: Borrowed reference. + # Return the object at position pos in the tuple pointed to by + # p. If pos is out of bounds, return NULL and sets an IndexError + # exception. + + PyObject* PyTuple_GET_ITEM(object p, Py_ssize_t pos) + # Return value: Borrowed reference. + # Like PyTuple_GetItem(), but does no checking of its arguments. + + tuple PyTuple_GetSlice(object p, Py_ssize_t low, Py_ssize_t high) + # Return value: New reference. + # Take a slice of the tuple pointed to by p from low to high and return it as a new tuple. + + int PyTuple_SetItem(object p, Py_ssize_t pos, object o) except -1 + # Insert a reference to object o at position pos of the tuple + # pointed to by p. Return 0 on success. + # + # WARNING: This function _steals_ a reference to o. + + void PyTuple_SET_ITEM(object p, Py_ssize_t pos, object o) + # Like PyTuple_SetItem(), but does no error checking, and should + # only be used to fill in brand new tuples. + # + # WARNING: This function _steals_ a reference to o. + + int _PyTuple_Resize(PyObject **p, Py_ssize_t newsize) except -1 + # Can be used to resize a tuple. newsize will be the new length of + # the tuple. Because tuples are supposed to be immutable, this + # should only be used if there is only one reference to the + # object. Do not use this if the tuple may already be known to + # some other part of the code. The tuple will always grow or + # shrink at the end. Think of this as destroying the old tuple and + # creating a new one, only more efficiently. Returns 0 on + # success. Client code should never assume that the resulting + # value of *p will be the same as before calling this function. If + # the object referenced by *p is replaced, the original *p is + # destroyed. On failure, returns -1 and sets *p to NULL, and + # raises MemoryError or SystemError. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/unicode.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/unicode.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c798ae2b644b4f039d3cf36bdb278af1623401ab --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/unicode.pxd @@ -0,0 +1,639 @@ +from libc.stddef cimport wchar_t + +cdef extern from *: + ctypedef unsigned char Py_UCS1 # uint8_t + ctypedef unsigned short Py_UCS2 # uint16_t + + # Return true if the object o is a Unicode object or an instance + # of a Unicode subtype. Changed in version 2.2: Allowed subtypes + # to be accepted. + bint PyUnicode_Check(object o) + + # Return true if the object o is a Unicode object, but not an + # instance of a subtype. New in version 2.2. + bint PyUnicode_CheckExact(object o) + + # Return the size of the object. o has to be a PyUnicodeObject + # (not checked). + # + # Deprecated since version 3.3, will be removed in version 3.10: + # Part of the old-style Unicode API, please migrate to using + # PyUnicode_GET_LENGTH(). + Py_ssize_t PyUnicode_GET_SIZE(object o) + + # Return the length of the Unicode string, in code points. o has + # to be a Unicode object in the “canonical” representation (not + # checked). + # + # New in version 3.3. + Py_ssize_t PyUnicode_GET_LENGTH(object o) + + Py_UCS1 *PyUnicode_1BYTE_DATA(object o) + Py_UCS2 *PyUnicode_2BYTE_DATA(object o) + Py_UCS4 *PyUnicode_4BYTE_DATA(object o) + + int PyUnicode_WCHAR_KIND # Deprecated since Python 3.10, removed in 3.12. + int PyUnicode_1BYTE_KIND + int PyUnicode_2BYTE_KIND + int PyUnicode_4BYTE_KIND + void PyUnicode_WRITE(int kind, void *data, Py_ssize_t index, Py_UCS4 value) + Py_UCS4 PyUnicode_READ(int kind, void *data, Py_ssize_t index) + Py_UCS4 PyUnicode_READ_CHAR(object o, Py_ssize_t index) + + unsigned int PyUnicode_KIND(object o) + void *PyUnicode_DATA(object o) + + # Return the size of the object's internal buffer in bytes. o has + # to be a PyUnicodeObject (not checked). + Py_ssize_t PyUnicode_GET_DATA_SIZE(object o) + + # Return a pointer to the internal Py_UNICODE buffer of the + # object. o has to be a PyUnicodeObject (not checked). + Py_UNICODE* PyUnicode_AS_UNICODE(object o) + + # Return a pointer to the internal buffer of the object. o has to + # be a PyUnicodeObject (not checked). + char* PyUnicode_AS_DATA(object o) + + bint PyUnicode_IsIdentifier(object o) + + # Return 1 or 0 depending on whether ch is a whitespace character. + bint Py_UNICODE_ISSPACE(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is a lowercase character. + bint Py_UNICODE_ISLOWER(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is an uppercase character. + bint Py_UNICODE_ISUPPER(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is a titlecase character. + bint Py_UNICODE_ISTITLE(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is a linebreak character. + bint Py_UNICODE_ISLINEBREAK(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is a decimal character. + bint Py_UNICODE_ISDECIMAL(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is a digit character. + bint Py_UNICODE_ISDIGIT(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is a numeric character. + bint Py_UNICODE_ISNUMERIC(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is an alphabetic character. + bint Py_UNICODE_ISALPHA(Py_UCS4 ch) + + # Return 1 or 0 depending on whether ch is an alphanumeric character. + bint Py_UNICODE_ISALNUM(Py_UCS4 ch) + + bint Py_UNICODE_ISPRINTABLE(Py_UCS4 ch) + + # Return the character ch converted to lower case. + # Used to return a Py_UNICODE value before Py3.3. + Py_UCS4 Py_UNICODE_TOLOWER(Py_UCS4 ch) + + # Return the character ch converted to upper case. + # Used to return a Py_UNICODE value before Py3.3. + Py_UCS4 Py_UNICODE_TOUPPER(Py_UCS4 ch) + + # Return the character ch converted to title case. + # Used to return a Py_UNICODE value before Py3.3. + Py_UCS4 Py_UNICODE_TOTITLE(Py_UCS4 ch) + + # Return the character ch converted to a decimal positive + # integer. Return -1 if this is not possible. This macro does not + # raise exceptions. + int Py_UNICODE_TODECIMAL(Py_UCS4 ch) + + # Return the character ch converted to a single digit + # integer. Return -1 if this is not possible. This macro does not + # raise exceptions. + int Py_UNICODE_TODIGIT(Py_UCS4 ch) + + # Return the character ch converted to a double. Return -1.0 if + # this is not possible. This macro does not raise exceptions. + double Py_UNICODE_TONUMERIC(Py_UCS4 ch) + + # To create Unicode objects and access their basic sequence + # properties, use these APIs: + + # Create a Unicode Object from the Py_UNICODE buffer u of the + # given size. u may be NULL which causes the contents to be + # undefined. It is the user's responsibility to fill in the needed + # data. The buffer is copied into the new object. If the buffer is + # not NULL, the return value might be a shared object. Therefore, + # modification of the resulting Unicode object is only allowed + # when u is NULL. + unicode PyUnicode_FromUnicode(Py_UNICODE *u, Py_ssize_t size) + + # Similar to PyUnicode_FromUnicode(), but u points to UTF-8 encoded + # bytes + unicode PyUnicode_FromStringAndSize(const char *u, Py_ssize_t size) + + # Similar to PyUnicode_FromUnicode(), but u points to null-terminated + # UTF-8 encoded bytes. The size is determined with strlen(). + unicode PyUnicode_FromString(const char *u) + + unicode PyUnicode_New(Py_ssize_t size, Py_UCS4 maxchar) + unicode PyUnicode_FromKindAndData(int kind, const void *buffer, Py_ssize_t size) + unicode PyUnicode_FromFormat(const char *format, ...) + Py_ssize_t PyUnicode_GetLength(object unicode) except -1 + Py_ssize_t PyUnicode_CopyCharacters(object to, Py_ssize_t to_start, object from_, Py_ssize_t from_start, Py_ssize_t how_many) except -1 + Py_ssize_t PyUnicode_Fill(object unicode, Py_ssize_t start, Py_ssize_t length, Py_UCS4 fill_char) except -1 + int PyUnicode_WriteChar(object unicode, Py_ssize_t index, Py_UCS4 character) except -1 + Py_UCS4 PyUnicode_ReadChar(object unicode, Py_ssize_t index) except -1 + unicode PyUnicode_Substring(object str, Py_ssize_t start, Py_ssize_t end) + Py_UCS4 *PyUnicode_AsUCS4(object u, Py_UCS4 *buffer, Py_ssize_t buflen, int copy_null) except NULL + Py_UCS4 *PyUnicode_AsUCS4Copy(object u) except NULL + + # Create a Unicode Object from the given Unicode code point ordinal. + # + # The ordinal must be in range(0x10000) on narrow Python builds + # (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError + # is raised in case it is not. + unicode PyUnicode_FromOrdinal(int ordinal) + + # Return a read-only pointer to the Unicode object's internal + # Py_UNICODE buffer, NULL if unicode is not a Unicode object. + Py_UNICODE* PyUnicode_AsUnicode(object o) except NULL + + # Return the length of the Unicode object. + Py_ssize_t PyUnicode_GetSize(object o) except -1 + + # Coerce an encoded object obj to an Unicode object and return a + # reference with incremented refcount. + # String and other char buffer compatible objects are decoded + # according to the given encoding and using the error handling + # defined by errors. Both can be NULL to have the interface use + # the default values (see the next section for details). + # All other objects, including Unicode objects, cause a TypeError + # to be set. + object PyUnicode_FromEncodedObject(object o, char *encoding, char *errors) + + # Shortcut for PyUnicode_FromEncodedObject(obj, NULL, "strict") + # which is used throughout the interpreter whenever coercion to + # Unicode is needed. + object PyUnicode_FromObject(object obj) + + # If the platform supports wchar_t and provides a header file + # wchar.h, Python can interface directly to this type using the + # following functions. Support is optimized if Python's own + # Py_UNICODE type is identical to the system's wchar_t. + + # Create a Unicode object from the wchar_t buffer w of the given + # size. Return NULL on failure. + object PyUnicode_FromWideChar(wchar_t *w, Py_ssize_t size) + + # Copy the Unicode object contents into the wchar_t buffer w. + # At most size wchar_t characters are copied (excluding a possibly + # trailing null termination character). Return the number of wchar_t + # characters copied or -1 in case of an error. Note that the + # esulting wchar_t* string may or may not be null-terminated. + # It is the responsibility of the caller to make sure that the wchar_t* + # string is null-terminated in case this is required by the application. + # Also, note that the wchar_t* string might contain null characters, + # which would cause the string to be truncated when used with most C functions. + Py_ssize_t PyUnicode_AsWideChar(object o, wchar_t *w, Py_ssize_t size) except -1 + + # Convert the Unicode object to a wide character string. The output + # string always ends with a null character. If size is not NULL, + # write the number of wide characters (excluding the trailing null + # termination character) into *size. Note that the resulting wchar_t + # string might contain null characters, which would cause the string + # to be truncated when used with most C functions. If size is NULL and + # the wchar_t* string contains null characters a ValueError is raised. + + # Returns a buffer allocated by PyMem_New (use PyMem_Free() to free it) + # on success. On error, returns NULL and *size is undefined. Raises a + # MemoryError if memory allocation is failed. + wchar_t *PyUnicode_AsWideCharString(object o, Py_ssize_t *size) except NULL + +# Unicode Methods + + # Concat two strings giving a new Unicode string. + # Return value: New reference. + unicode PyUnicode_Concat(object left, object right) + + # Split a string giving a list of Unicode strings. If sep is NULL, + # splitting will be done at all whitespace substrings. Otherwise, + # splits occur at the given separator. At most maxsplit splits will + # be done. If negative, no limit is set. Separators are not included + # in the resulting list. + # Return value: New reference. + list PyUnicode_Split(object s, object sep, Py_ssize_t maxsplit) + + # Split a Unicode string at line breaks, returning a list of Unicode + # strings. CRLF is considered to be one line break. If keepend is 0, + # the Line break characters are not included in the resulting strings. + # Return value: New reference. + list PyUnicode_Splitlines(object s, bint keepend) + + # Translate a string by applying a character mapping table to it and + # return the resulting Unicode object. + # + # The mapping table must map Unicode ordinal integers to Unicode ordinal + # integers or None (causing deletion of the character). + # + # Mapping tables need only provide the __getitem__() interface; + # dictionaries and sequences work well. Unmapped character ordinals (ones + # which cause a LookupError) are left untouched and are copied as-is. + # + # errors has the usual meaning for codecs. It may be NULL which indicates + # to use the default error handling. + # Return value: New reference. + unicode PyUnicode_Translate(object str, object table, const char *errors) + + # Join a sequence of strings using the given separator and return the + # resulting Unicode string. + # Return value: New reference. + unicode PyUnicode_Join(object separator, object seq) + + # Return 1 if substr matches str[start:end] at the given tail end + # (direction == -1 means to do a prefix match, direction == 1 a + # suffix match), 0 otherwise. + # Return -1 if an error occurred. + Py_ssize_t PyUnicode_Tailmatch(object str, object substr, + Py_ssize_t start, Py_ssize_t end, int direction) except -1 + + # Return the first position of substr in str[start:end] using the given + # direction (direction == 1 means to do a forward search, direction == -1 + # a backward search). The return value is the index of the first match; + # a value of -1 indicates that no match was found, and -2 indicates that an + # error occurred and an exception has been set. + Py_ssize_t PyUnicode_Find(object str, object substr, Py_ssize_t start, Py_ssize_t end, int direction) except -2 + + # Return the first position of the character ch in str[start:end] using + # the given direction (direction == 1 means to do a forward search, + # direction == -1 a backward search). The return value is the index of + # the first match; a value of -1 indicates that no match was found, and + # -2 indicates that an error occurred and an exception has been set. + # New in version 3.3. + Py_ssize_t PyUnicode_FindChar(object str, Py_UCS4 ch, Py_ssize_t start, Py_ssize_t end, int direction) except -2 + + # Return the number of non-overlapping occurrences of substr in + # str[start:end]. Return -1 if an error occurred. + Py_ssize_t PyUnicode_Count(object str, object substr, Py_ssize_t start, Py_ssize_t end) except -1 + + # Replace at most maxcount occurrences of substr in str with replstr and + # return the resulting Unicode object. maxcount == -1 means replace all + # occurrences. + # Return value: New reference. + unicode PyUnicode_Replace(object str, object substr, object replstr, Py_ssize_t maxcount) + + # Compare two strings and return -1, 0, 1 for less than, + # equal, and greater than, respectively. + int PyUnicode_Compare(object left, object right) except? -1 + + # Compare a unicode object, uni, with string and return -1, 0, 1 for less than, + # equal, and greater than, respectively. It is best to pass only ASCII-encoded + # strings, but the function interprets the input string as ISO-8859-1 if it + # contains non-ASCII characters. + int PyUnicode_CompareWithASCIIString(object uni, const char *string) + + # Rich compare two unicode strings and return one of the following: + # + # NULL in case an exception was raised + # Py_True or Py_False for successful comparisons + # Py_NotImplemented in case the type combination is unknown + # + # Note that Py_EQ and Py_NE comparisons can cause a UnicodeWarning in case + # the conversion of the arguments to Unicode fails with a UnicodeDecodeError. + # + # Possible values for op are Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, and Py_LE. + object PyUnicode_RichCompare(object left, object right, int op) + + # Return a new string object from format and args; this is analogous to + # format % args. + # Return value: New reference. + unicode PyUnicode_Format(object format, object args) + + # Check whether element is contained in container and return true or false + # accordingly. + # + # element has to coerce to a one element Unicode string. -1 is returned + # if there was an error. + int PyUnicode_Contains(object container, object element) except -1 + + # Intern the argument *string in place. The argument must be the address + # of a pointer variable pointing to a Python unicode string object. If + # there is an existing interned string that is the same as *string, it sets + # *string to it (decrementing the reference count of the old string object + # and incrementing the reference count of the interned string object), + # otherwise it leaves *string alone and interns it (incrementing its reference + # count). (Clarification: even though there is a lot of talk about reference + # counts, think of this function as reference-count-neutral; you own the object + # after the call if and only if you owned it before the call.) + #void PyUnicode_InternInPlace(PyObject **string) + + # A combination of PyUnicode_FromString() and PyUnicode_InternInPlace(), + # returning either a new unicode string object that has been interned, or + # a new ("owned") reference to an earlier interned string object with the + # same value. + unicode PyUnicode_InternFromString(const char *v) + + +# Codecs + + # Create a Unicode object by decoding size bytes of the encoded + # string s. encoding and errors have the same meaning as the + # parameters of the same name in the unicode() builtin + # function. The codec to be used is looked up using the Python + # codec registry. Return NULL if an exception was raised by the + # codec. + object PyUnicode_Decode(char *s, Py_ssize_t size, char *encoding, char *errors) + + # Encode the Py_UNICODE buffer of the given size and return a + # Python string object. encoding and errors have the same meaning + # as the parameters of the same name in the Unicode encode() + # method. The codec to be used is looked up using the Python codec + # registry. Return NULL if an exception was raised by the codec. + object PyUnicode_Encode(Py_UNICODE *s, Py_ssize_t size, + char *encoding, char *errors) + + # Encode a Unicode object and return the result as Python string + # object. encoding and errors have the same meaning as the + # parameters of the same name in the Unicode encode() method. The + # codec to be used is looked up using the Python codec + # registry. Return NULL if an exception was raised by the codec. + object PyUnicode_AsEncodedString(object unicode, char *encoding, char *errors) + +# These are the UTF-8 codec APIs: + + # Create a Unicode object by decoding size bytes of the UTF-8 + # encoded string s. Return NULL if an exception was raised by the + # codec. + unicode PyUnicode_DecodeUTF8(char *s, Py_ssize_t size, char *errors) + + # If consumed is NULL, behave like PyUnicode_DecodeUTF8(). If + # consumed is not NULL, trailing incomplete UTF-8 byte sequences + # will not be treated as an error. Those bytes will not be decoded + # and the number of bytes that have been decoded will be stored in + # consumed. New in version 2.4. + unicode PyUnicode_DecodeUTF8Stateful(char *s, Py_ssize_t size, char *errors, Py_ssize_t *consumed) + + # Encode the Py_UNICODE buffer of the given size using UTF-8 and + # return a Python string object. Return NULL if an exception was + # raised by the codec. + bytes PyUnicode_EncodeUTF8(Py_UNICODE *s, Py_ssize_t size, char *errors) + + # Encode a Unicode objects using UTF-8 and return the result as Python bytes object. Error handling is ``strict''. Return NULL if an exception was raised by the codec. + bytes PyUnicode_AsUTF8String(object unicode) + + + # Return a pointer to the UTF-8 encoding of the Unicode object, + # and store the size of the encoded representation (in bytes) in size. + # The size argument can be NULL; in this case no size will be stored. + # The returned buffer always has an extra null byte appended + # (not included in size), regardless of whether there are any + # other null code points. + + # In the case of an error, NULL is returned with an exception set and + # no size is stored. + + # This caches the UTF-8 representation of the string in the Unicode + # object, and subsequent calls will return a pointer to the same buffer. + # The caller is not responsible for deallocating the buffer + const char* PyUnicode_AsUTF8AndSize(object unicode, Py_ssize_t *size) except NULL + + + # As PyUnicode_AsUTF8AndSize(), but does not store the size. + const char *PyUnicode_AsUTF8(object unicode) except NULL + +# These are the UTF-16 codec APIs: + + # Decode length bytes from a UTF-16 encoded buffer string and + # return the corresponding Unicode object. errors (if non-NULL) + # defines the error handling. It defaults to ``strict''. + # + # If byteorder is non-NULL, the decoder starts decoding using the + # given byte order: + # + # *byteorder == -1: little endian + # *byteorder == 0: native order + # *byteorder == 1: big endian + # + # and then switches if the first two bytes of the input data are a + # byte order mark (BOM) and the specified byte order is native + # order. This BOM is not copied into the resulting Unicode + # string. After completion, *byteorder is set to the current byte + # order at the. + # + # If byteorder is NULL, the codec starts in native order mode. + unicode PyUnicode_DecodeUTF16(char *s, Py_ssize_t size, char *errors, int *byteorder) + + # If consumed is NULL, behave like PyUnicode_DecodeUTF16(). If + # consumed is not NULL, PyUnicode_DecodeUTF16Stateful() will not + # treat trailing incomplete UTF-16 byte sequences (such as an odd + # number of bytes or a split surrogate pair) as an error. Those + # bytes will not be decoded and the number of bytes that have been + # decoded will be stored in consumed. New in version 2.4. + unicode PyUnicode_DecodeUTF16Stateful(char *s, Py_ssize_t size, char *errors, int *byteorder, Py_ssize_t *consumed) + + # Return a Python string object holding the UTF-16 encoded value + # of the Unicode data in s. If byteorder is not 0, output is + # written according to the following byte order: + # + # byteorder == -1: little endian + # byteorder == 0: native byte order (writes a BOM mark) + # byteorder == 1: big endian + # + # If byteorder is 0, the output string will always start with the + # Unicode BOM mark (U+FEFF). In the other two modes, no BOM mark + # is prepended. + # + # If Py_UNICODE_WIDE is defined, a single Py_UNICODE value may get + # represented as a surrogate pair. If it is not defined, each + # Py_UNICODE values is interpreted as an UCS-2 character. + bytes PyUnicode_EncodeUTF16(Py_UNICODE *s, Py_ssize_t size, char *errors, int byteorder) + + # Return a Python string using the UTF-16 encoding in native byte + # order. The string always starts with a BOM mark. Error handling + # is ``strict''. Return NULL if an exception was raised by the + # codec. + bytes PyUnicode_AsUTF16String(object unicode) + +# These are the ``Unicode Escape'' codec APIs: + + # Create a Unicode object by decoding size bytes of the + # Unicode-Escape encoded string s. Return NULL if an exception was + # raised by the codec. + object PyUnicode_DecodeUnicodeEscape(char *s, Py_ssize_t size, char *errors) + + # Encode the Py_UNICODE buffer of the given size using + # Unicode-Escape and return a Python string object. Return NULL if + # an exception was raised by the codec. + object PyUnicode_EncodeUnicodeEscape(Py_UNICODE *s, Py_ssize_t size) + + # Encode a Unicode objects using Unicode-Escape and return the + # result as Python string object. Error handling is + # ``strict''. Return NULL if an exception was raised by the codec. + object PyUnicode_AsUnicodeEscapeString(object unicode) + +# These are the ``Raw Unicode Escape'' codec APIs: + + # Create a Unicode object by decoding size bytes of the + # Raw-Unicode-Escape encoded string s. Return NULL if an exception + # was raised by the codec. + object PyUnicode_DecodeRawUnicodeEscape(char *s, Py_ssize_t size, char *errors) + + # Encode the Py_UNICODE buffer of the given size using + # Raw-Unicode-Escape and return a Python string object. Return + # NULL if an exception was raised by the codec. + object PyUnicode_EncodeRawUnicodeEscape(Py_UNICODE *s, Py_ssize_t size, char *errors) + + # Encode a Unicode objects using Raw-Unicode-Escape and return the + # result as Python string object. Error handling is + # ``strict''. Return NULL if an exception was raised by the codec. + object PyUnicode_AsRawUnicodeEscapeString(object unicode) + +# These are the Latin-1 codec APIs: Latin-1 corresponds to the first 256 Unicode ordinals and only these are accepted by the codecs during encoding. + + # Create a Unicode object by decoding size bytes of the Latin-1 + # encoded string s. Return NULL if an exception was raised by the + # codec. + unicode PyUnicode_DecodeLatin1(char *s, Py_ssize_t size, char *errors) + + # Encode the Py_UNICODE buffer of the given size using Latin-1 and + # return a Python bytes object. Return NULL if an exception was + # raised by the codec. + bytes PyUnicode_EncodeLatin1(Py_UNICODE *s, Py_ssize_t size, char *errors) + + # Encode a Unicode objects using Latin-1 and return the result as + # Python bytes object. Error handling is ``strict''. Return NULL + # if an exception was raised by the codec. + bytes PyUnicode_AsLatin1String(object unicode) + +# These are the ASCII codec APIs. Only 7-bit ASCII data is +# accepted. All other codes generate errors. + + # Create a Unicode object by decoding size bytes of the ASCII + # encoded string s. Return NULL if an exception was raised by the + # codec. + unicode PyUnicode_DecodeASCII(char *s, Py_ssize_t size, char *errors) + + # Encode the Py_UNICODE buffer of the given size using ASCII and + # return a Python bytes object. Return NULL if an exception was + # raised by the codec. + bytes PyUnicode_EncodeASCII(Py_UNICODE *s, Py_ssize_t size, char *errors) + + # Encode a Unicode objects using ASCII and return the result as + # Python bytes object. Error handling is ``strict''. Return NULL + # if an exception was raised by the codec. + bytes PyUnicode_AsASCIIString(object o) + +# These are the mapping codec APIs: +# +# This codec is special in that it can be used to implement many +# different codecs (and this is in fact what was done to obtain most +# of the standard codecs included in the encodings package). The codec +# uses mapping to encode and decode characters. +# +# Decoding mappings must map single string characters to single +# Unicode characters, integers (which are then interpreted as Unicode +# ordinals) or None (meaning "undefined mapping" and causing an +# error). +# +# Encoding mappings must map single Unicode characters to single +# string characters, integers (which are then interpreted as Latin-1 +# ordinals) or None (meaning "undefined mapping" and causing an +# error). +# +# The mapping objects provided must only support the __getitem__ +# mapping interface. +# +# If a character lookup fails with a LookupError, the character is +# copied as-is meaning that its ordinal value will be interpreted as +# Unicode or Latin-1 ordinal resp. Because of this, mappings only need +# to contain those mappings which map characters to different code +# points. + + # Create a Unicode object by decoding size bytes of the encoded + # string s using the given mapping object. Return NULL if an + # exception was raised by the codec. If mapping is NULL latin-1 + # decoding will be done. Else it can be a dictionary mapping byte + # or a unicode string, which is treated as a lookup table. Byte + # values greater that the length of the string and U+FFFE + # "characters" are treated as "undefined mapping". Changed in + # version 2.4: Allowed unicode string as mapping argument. + object PyUnicode_DecodeCharmap(char *s, Py_ssize_t size, object mapping, char *errors) + + # Encode the Py_UNICODE buffer of the given size using the given + # mapping object and return a Python string object. Return NULL if + # an exception was raised by the codec. + # + # Deprecated since version 3.3, will be removed in version 4.0. + object PyUnicode_EncodeCharmap(Py_UNICODE *s, Py_ssize_t size, object mapping, char *errors) + + # Encode a Unicode objects using the given mapping object and + # return the result as Python string object. Error handling is + # ``strict''. Return NULL if an exception was raised by the codec. + object PyUnicode_AsCharmapString(object o, object mapping) + +# The following codec API is special in that maps Unicode to Unicode. + + # Translate a Py_UNICODE buffer of the given length by applying a + # character mapping table to it and return the resulting Unicode + # object. Return NULL when an exception was raised by the codec. + # + # The mapping table must map Unicode ordinal integers to Unicode + # ordinal integers or None (causing deletion of the character). + # + # Mapping tables need only provide the __getitem__() interface; + # dictionaries and sequences work well. Unmapped character + # ordinals (ones which cause a LookupError) are left untouched and + # are copied as-is. + # + # Deprecated since version 3.3, will be removed in version 4.0. + object PyUnicode_TranslateCharmap(Py_UNICODE *s, Py_ssize_t size, + object table, char *errors) + +# These are the MBCS codec APIs. They are currently only available on +# Windows and use the Win32 MBCS converters to implement the +# conversions. Note that MBCS (or DBCS) is a class of encodings, not +# just one. The target encoding is defined by the user settings on the +# machine running the codec. + + # Create a Unicode object by decoding size bytes of the MBCS + # encoded string s. Return NULL if an exception was raised by the + # codec. + unicode PyUnicode_DecodeMBCS(char *s, Py_ssize_t size, char *errors) + + # If consumed is NULL, behave like PyUnicode_DecodeMBCS(). If + # consumed is not NULL, PyUnicode_DecodeMBCSStateful() will not + # decode trailing lead byte and the number of bytes that have been + # decoded will be stored in consumed. New in version 2.5. + # NOTE: Python 2.x uses 'int' values for 'size' and 'consumed' (changed in 3.0) + unicode PyUnicode_DecodeMBCSStateful(char *s, Py_ssize_t size, char *errors, Py_ssize_t *consumed) + + # Encode the Py_UNICODE buffer of the given size using MBCS and + # return a Python string object. Return NULL if an exception was + # raised by the codec. + bytes PyUnicode_EncodeMBCS(Py_UNICODE *s, Py_ssize_t size, char *errors) + + # Encode a Unicode objects using MBCS and return the result as + # Python string object. Error handling is ``strict''. Return NULL + # if an exception was raised by the codec. + bytes PyUnicode_AsMBCSString(object o) + + # Encode the Unicode object using the specified code page and return + # a Python bytes object. Return NULL if an exception was raised by the + # codec. Use CP_ACP code page to get the MBCS encoder. + # + # New in version 3.3. + bytes PyUnicode_EncodeCodePage(int code_page, object unicode, const char *errors) + + +# Py_UCS4 helpers (new in CPython 3.3) + + # These utility functions work on strings of Py_UCS4 characters and + # otherwise behave like the C standard library functions with the same name. + + size_t Py_UCS4_strlen(const Py_UCS4 *u) + Py_UCS4* Py_UCS4_strcpy(Py_UCS4 *s1, const Py_UCS4 *s2) + Py_UCS4* Py_UCS4_strncpy(Py_UCS4 *s1, const Py_UCS4 *s2, size_t n) + Py_UCS4* Py_UCS4_strcat(Py_UCS4 *s1, const Py_UCS4 *s2) + int Py_UCS4_strcmp(const Py_UCS4 *s1, const Py_UCS4 *s2) + int Py_UCS4_strncmp(const Py_UCS4 *s1, const Py_UCS4 *s2, size_t n) + Py_UCS4* Py_UCS4_strchr(const Py_UCS4 *s, Py_UCS4 c) + Py_UCS4* Py_UCS4_strrchr(const Py_UCS4 *s, Py_UCS4 c) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/__init__.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/__init__.pxd @@ -0,0 +1 @@ +# empty file diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/complex.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/complex.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7cd740cb8ec2cefae042264425ec90bf31aa8b90 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/complex.pxd @@ -0,0 +1,35 @@ +cdef extern from "" nogil: + # Trigonometric functions. + double complex cacos(double complex z) + double complex casin(double complex z) + double complex catan(double complex z) + double complex ccos(double complex z) + double complex csin(double complex z) + double complex ctan(double complex z) + + # Hyperbolic functions. + double complex cacosh(double complex z) + double complex casinh(double complex z) + double complex catanh(double complex z) + double complex ccosh(double complex z) + double complex csinh(double complex z) + double complex ctanh(double complex z) + + # Exponential and logarithmic functions. + double complex cexp(double complex z) + double complex clog(double complex z) + double complex clog10(double complex z) + + # Power functions. + double complex cpow(double complex x, double complex y) + double complex csqrt(double complex z) + + # Absolute value, conjugates, and projection. + double cabs(double complex z) + double carg(double complex z) + double complex conj(double complex z) + double complex cproj(double complex z) + + # Decomposing complex values. + double cimag(double complex z) + double creal(double complex z) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/errno.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/errno.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9803a25fe3d12e70d71964b35dbddc8ffc4de8d9 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/errno.pxd @@ -0,0 +1,127 @@ +# 7.5 Errors + +cdef extern from "" nogil: + enum: + EPERM + ENOENT + ESRCH + EINTR + EIO + ENXIO + E2BIG + ENOEXEC + EBADF + ECHILD + EAGAIN + ENOMEM + EACCES + EFAULT + ENOTBLK + EBUSY + EEXIST + EXDEV + ENODEV + ENOTDIR + EISDIR + EINVAL + ENFILE + EMFILE + ENOTTY + ETXTBSY + EFBIG + ENOSPC + ESPIPE + EROFS + EMLINK + EPIPE + EDOM + ERANGE + EDEADLOCK + ENAMETOOLONG + ENOLCK + ENOSYS + ENOTEMPTY + ELOOP + ENOMSG + EIDRM + ECHRNG + EL2NSYNC + EL3HLT + EL3RST + ELNRNG + EUNATCH + ENOCSI + EL2HLT + EBADE + EBADR + EXFULL + ENOANO + EBADRQC + EBADSLT + EBFONT + ENOSTR + ENODATA + ENOATTR + ETIME + ENOSR + ENONET + ENOPKG + EREMOTE + ENOLINK + EADV + ESRMNT + ECOMM + EPROTO + EMULTIHOP + EDOTDOT + EBADMSG + EOVERFLOW + ENOTUNIQ + EBADFD + EREMCHG + ELIBACC + ELIBBAD + ELIBSCN + ELIBMAX + ELIBEXEC + EILSEQ + ERESTART + ESTRPIPE + EUSERS + ENOTSOCK + EDESTADDRREQ + EMSGSIZE + EPROTOTYPE + ENOPROTOOPT + EPROTONOSUPPORT + ESOCKTNOSUPPORT + EOPNOTSUPP + EPFNOSUPPORT + EAFNOSUPPORT + EADDRINUSE + EADDRNOTAVAIL + ENETDOWN + ENETUNREACH + ENETRESET + ECONNABORTED + ECONNRESET + ENOBUFS + EISCONN + ENOTCONN + ESHUTDOWN + ETOOMANYREFS + ETIMEDOUT + ECONNREFUSED + EHOSTDOWN + EHOSTUNREACH + EALREADY + EINPROGRESS + ESTALE + EUCLEAN + ENOTNAM + ENAVAIL + EISNAM + EREMOTEIO + EDQUOT + + int errno diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/float.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/float.pxd new file mode 100644 index 0000000000000000000000000000000000000000..5e4e12d4f456af070239b0618643945a94756f7b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/float.pxd @@ -0,0 +1,43 @@ +# 5.2.4.2.2 Characteristics of floating types + +cdef extern from "": + + const float FLT_RADIX + + const float FLT_MANT_DIG + const double DBL_MANT_DIG + const long double LDBL_MANT_DIG + + const double DECIMAL_DIG + + const float FLT_DIG + const double DBL_DIG + const long double LDBL_DIG + + const float FLT_MIN_EXP + const double DBL_MIN_EXP + const long double LDBL_MIN_EXP + + const float FLT_MIN_10_EXP + const double DBL_MIN_10_EXP + const long double LDBL_MIN_10_EXP + + const float FLT_MAX_EXP + const double DBL_MAX_EXP + const long double LDBL_MAX_EXP + + const float FLT_MAX_10_EXP + const double DBL_MAX_10_EXP + const long double LDBL_MAX_10_EXP + + const float FLT_MAX + const double DBL_MAX + const long double LDBL_MAX + + const float FLT_EPSILON + const double DBL_EPSILON + const long double LDBL_EPSILON + + const float FLT_MIN + const double DBL_MIN + const long double LDBL_MIN diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/limits.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/limits.pxd new file mode 100644 index 0000000000000000000000000000000000000000..39d10a1ff9aea248331b8435ff0cf1127dc25ea0 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/limits.pxd @@ -0,0 +1,28 @@ +# 5.2.4.2.1 Sizes of integer types + +cdef extern from "": + const int CHAR_BIT + const int MB_LEN_MAX + + const char CHAR_MIN + const char CHAR_MAX + + const signed char SCHAR_MIN + const signed char SCHAR_MAX + const unsigned char UCHAR_MAX + + const short SHRT_MIN + const short SHRT_MAX + const unsigned short USHRT_MAX + + const int INT_MIN + const int INT_MAX + const unsigned int UINT_MAX + + const long LONG_MIN + const long LONG_MAX + const unsigned long ULONG_MAX + + const long long LLONG_MIN + const long long LLONG_MAX + const unsigned long long ULLONG_MAX diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/locale.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/locale.pxd new file mode 100644 index 0000000000000000000000000000000000000000..5cbec953ef591c0788478bcbec2e00936f31cc3b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/locale.pxd @@ -0,0 +1,46 @@ +# 7.11 Localization + +# deprecated cimport for backwards compatibility: +from libc.string cimport const_char + + +cdef extern from "" nogil: + + struct lconv: + char *decimal_point + char *thousands_sep + char *grouping + char *mon_decimal_point + char *mon_thousands_sep + char *mon_grouping + char *positive_sign + char *negative_sign + char *currency_symbol + char frac_digits + char p_cs_precedes + char n_cs_precedes + char p_sep_by_space + char n_sep_by_space + char p_sign_posn + char n_sign_posn + char *int_curr_symbol + char int_frac_digits + char int_p_cs_precedes + char int_n_cs_precedes + char int_p_sep_by_space + char int_n_sep_by_space + char int_p_sign_posn + char int_n_sign_posn + + enum: LC_ALL + enum: LC_COLLATE + enum: LC_CTYPE + enum: LC_MONETARY + enum: LC_NUMERIC + enum: LC_TIME + + # 7.11.1 Locale control + char *setlocale (int category, const char *locale) + + # 7.11.2 Numeric formatting convention inquiry + lconv *localeconv () diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/math.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/math.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4a9858c2af4bb625bbd20dfb7ff415d33481c7f6 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/math.pxd @@ -0,0 +1,209 @@ +cdef extern from "" nogil: + const double M_E + const double e "M_E" # as in Python's math module + const double M_LOG2E + const double M_LOG10E + const double M_LN2 + const double M_LN10 + const double M_PI + const double pi "M_PI" # as in Python's math module + const double M_PI_2 + const double M_PI_4 + const double M_1_PI + const double M_2_PI + const double M_2_SQRTPI + const double M_SQRT2 + const double M_SQRT1_2 + + # C99 constants + const float INFINITY + const float NAN + # note: not providing "nan" and "inf" aliases here as nan() is a function in C + const double HUGE_VAL + const float HUGE_VALF + const long double HUGE_VALL + + # All C99 functions in alphabetical order + double acos(double x) + float acosf(float) + double acosh(double x) + float acoshf(float) + long double acoshl(long double) + long double acosl(long double) + double asin(double x) + float asinf(float) + double asinh(double x) + float asinhf(float) + long double asinhl(long double) + long double asinl(long double) + double atan(double x) + double atan2(double y, double x) + float atan2f(float, float) + long double atan2l(long double, long double) + float atanf(float) + double atanh(double x) + float atanhf(float) + long double atanhl(long double) + long double atanl(long double) + double cbrt(double x) + float cbrtf(float) + long double cbrtl(long double) + double ceil(double x) + float ceilf(float) + long double ceill(long double) + double copysign(double, double) + float copysignf(float, float) + long double copysignl(long double, long double) + double cos(double x) + float cosf(float) + double cosh(double x) + float coshf(float) + long double coshl(long double) + long double cosl(long double) + double erf(double) + double erfc(double) + float erfcf(float) + long double erfcl(long double) + float erff(float) + long double erfl(long double) + double exp(double x) + double exp2(double x) + float exp2f(float) + long double exp2l(long double) + float expf(float) + long double expl(long double) + double expm1(double x) + float expm1f(float) + long double expm1l(long double) + double fabs(double x) + float fabsf(float) + long double fabsl(long double) + double fdim(double x, double y) + float fdimf(float, float) + long double fdiml(long double, long double) + double floor(double x) + float floorf(float) + long double floorl(long double) + double fma(double x, double y, double z) + float fmaf(float, float, float) + long double fmal(long double, long double, long double) + double fmax(double x, double y) + float fmaxf(float, float) + long double fmaxl(long double, long double) + double fmin(double x, double y) + float fminf(float, float) + long double fminl(long double, long double) + double fmod(double x, double y) + float fmodf(float, float) + long double fmodl(long double, long double) + double frexp(double x, int* exponent) + float frexpf(float, int* exponent) + long double frexpl(long double, int*) + double hypot(double x, double y) + float hypotf(float, float) + long double hypotl(long double, long double) + int ilogb(double x) + int ilogbf(float) + int ilogbl(long double) + double ldexp(double x, int exponent) + float ldexpf(float, int exponent) + long double ldexpl(long double, int exponent) + double lgamma(double x) + float lgammaf(float) + long double lgammal(long double) + long long llrint(double) + long long llrintf(float) + long long llrintl(long double) + long long llround(double) + long long llroundf(float) + long long llroundl(long double) + double log(double x) + double log10(double x) + float log10f(float) + long double log10l(long double) + double log1p(double x) + float log1pf(float) + long double log1pl(long double) + double log2(double x) + float log2f(float) + long double log2l(long double) + double logb(double x) + float logbf(float) + long double logbl(long double) + float logf(float) + long double logl(long double) + long lrint(double) + long lrintf(float) + long lrintl(long double) + long lround(double) + long lroundf(float) + long lroundl(long double) + double modf(double x, double* iptr) + float modff(float, float* iptr) + long double modfl(long double, long double* iptr) + double nan(const char*) + float nanf(const char*) + long double nanl(const char*) + double nearbyint(double x) + float nearbyintf(float) + long double nearbyintl(long double) + double nextafter(double, double) + float nextafterf(float, float) + long double nextafterl(long double, long double) + double nexttoward(double, long double) + float nexttowardf(float, long double) + long double nexttowardl(long double, long double) + double pow(double x, double y) + float powf(float, float) + long double powl(long double, long double) + double remainder(double x, double y) + float remainderf(float, float) + long double remainderl(long double, long double) + double remquo(double x, double y, int* quot) + float remquof(float, float, int* quot) + long double remquol(long double, long double, int* quot) + double rint(double x) + float rintf(float) + long double rintl(long double) + double round(double x) + float roundf(float) + long double roundl(long double) + double scalbln(double x, long n) + float scalblnf(float, long) + long double scalblnl(long double, long) + double scalbn(double x, int n) + float scalbnf(float, int) + long double scalbnl(long double, int) + double sin(double x) + float sinf(float) + double sinh(double x) + float sinhf(float) + long double sinhl(long double) + long double sinl(long double) + double sqrt(double x) + float sqrtf(float) + long double sqrtl(long double) + double tan(double x) + float tanf(float) + double tanh(double x) + float tanhf(float) + long double tanhl(long double) + long double tanl(long double) + double tgamma(double x) + float tgammaf(float) + long double tgammal(long double) + double trunc(double x) + float truncf(float) + long double truncl(long double) + + int isinf(long double) # -1 / 0 / 1 + bint isfinite(long double) + bint isnan(long double) + bint isnormal(long double) + bint signbit(long double) + int fpclassify(long double) + const int FP_NAN + const int FP_INFINITE + const int FP_ZERO + const int FP_SUBNORMAL + const int FP_NORMAL diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/setjmp.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/setjmp.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6c11a534d49b946180771976588f7fabb0cfdfaf --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/setjmp.pxd @@ -0,0 +1,10 @@ +cdef extern from "" nogil: + ctypedef struct jmp_buf: + pass + int setjmp(jmp_buf state) + void longjmp(jmp_buf state, int value) + + ctypedef struct sigjmp_buf: + pass + int sigsetjmp(sigjmp_buf state, int savesigs) + void siglongjmp(sigjmp_buf state, int value) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/signal.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/signal.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fd392adea943b8cdf55f8829da51364a0ddb89b9 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/signal.pxd @@ -0,0 +1,64 @@ +# 7.14 Signal handling + +ctypedef void (*sighandler_t)(int SIGNUM) noexcept nogil + +cdef extern from "" nogil: + + ctypedef int sig_atomic_t + + sighandler_t SIG_DFL + sighandler_t SIG_IGN + sighandler_t SIG_ERR + + sighandler_t signal (int signum, sighandler_t action) + int raise_"raise" (int signum) + + # Signals + enum: + # Program Error + SIGFPE + SIGILL + SIGSEGV + SIGBUS + SIGABRT + SIGIOT + SIGTRAP + SIGEMT + SIGSYS + SIGSTKFLT + # Termination + SIGTERM + SIGINT + SIGQUIT + SIGKILL + SIGHUP + # Alarm + SIGALRM + SIGVTALRM + SIGPROF + # Asynchronous I/O + SIGIO + SIGURG + SIGPOLL + # Job Control + SIGCHLD + SIGCLD + SIGCONT + SIGSTOP + SIGTSTP + SIGTTIN + SIGTTOU + # Operation Error + SIGPIPE + SIGLOST + SIGXCPU + SIGXFSZ + SIGPWR + # Miscellaneous + SIGUSR1 + SIGUSR2 + SIGWINCH + SIGINFO + # Real-time signals + SIGRTMIN + SIGRTMAX diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stddef.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stddef.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9b0f4c5fd2c08c80d3a33bc899cb9878e4ec26f3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stddef.pxd @@ -0,0 +1,9 @@ +# 7.17 Common definitions + +cdef extern from "": + + ctypedef signed int ptrdiff_t + + ctypedef unsigned int size_t + + ctypedef int wchar_t diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdint.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdint.pxd new file mode 100644 index 0000000000000000000000000000000000000000..ced3d46adda26bcfbde31c8e89f4fb5f26ca1660 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdint.pxd @@ -0,0 +1,105 @@ +# Longness only used for type promotion. +# Actual compile time size used for conversions. + +# 7.18 Integer types +cdef extern from "" nogil: + + # 7.18.1 Integer types + # 7.18.1.1 Exact-width integer types + ctypedef signed char int8_t + ctypedef signed short int16_t + ctypedef signed int int32_t + ctypedef signed long int64_t + ctypedef unsigned char uint8_t + ctypedef unsigned short uint16_t + ctypedef unsigned int uint32_t + ctypedef unsigned long long uint64_t + # 7.18.1.2 Minimum-width integer types + ctypedef signed char int_least8_t + ctypedef signed short int_least16_t + ctypedef signed int int_least32_t + ctypedef signed long int_least64_t + ctypedef unsigned char uint_least8_t + ctypedef unsigned short uint_least16_t + ctypedef unsigned int uint_least32_t + ctypedef unsigned long long uint_least64_t + # 7.18.1.3 Fastest minimum-width integer types + ctypedef signed char int_fast8_t + ctypedef signed short int_fast16_t + ctypedef signed int int_fast32_t + ctypedef signed long int_fast64_t + ctypedef unsigned char uint_fast8_t + ctypedef unsigned short uint_fast16_t + ctypedef unsigned int uint_fast32_t + ctypedef unsigned long long uint_fast64_t + # 7.18.1.4 Integer types capable of holding object pointers + ctypedef ssize_t intptr_t + ctypedef size_t uintptr_t + # 7.18.1.5 Greatest-width integer types + ctypedef signed long long intmax_t + ctypedef unsigned long long uintmax_t + + # 7.18.2 Limits of specified-width integer types + # 7.18.2.1 Limits of exact-width integer types + int8_t INT8_MIN + int16_t INT16_MIN + int32_t INT32_MIN + int64_t INT64_MIN + int8_t INT8_MAX + int16_t INT16_MAX + int32_t INT32_MAX + int64_t INT64_MAX + uint8_t UINT8_MAX + uint16_t UINT16_MAX + uint32_t UINT32_MAX + uint64_t UINT64_MAX + #7.18.2.2 Limits of minimum-width integer types + int_least8_t INT_LEAST8_MIN + int_least16_t INT_LEAST16_MIN + int_least32_t INT_LEAST32_MIN + int_least64_t INT_LEAST64_MIN + int_least8_t INT_LEAST8_MAX + int_least16_t INT_LEAST16_MAX + int_least32_t INT_LEAST32_MAX + int_least64_t INT_LEAST64_MAX + uint_least8_t UINT_LEAST8_MAX + uint_least16_t UINT_LEAST16_MAX + uint_least32_t UINT_LEAST32_MAX + uint_least64_t UINT_LEAST64_MAX + #7.18.2.3 Limits of fastest minimum-width integer types + int_fast8_t INT_FAST8_MIN + int_fast16_t INT_FAST16_MIN + int_fast32_t INT_FAST32_MIN + int_fast64_t INT_FAST64_MIN + int_fast8_t INT_FAST8_MAX + int_fast16_t INT_FAST16_MAX + int_fast32_t INT_FAST32_MAX + int_fast64_t INT_FAST64_MAX + uint_fast8_t UINT_FAST8_MAX + uint_fast16_t UINT_FAST16_MAX + uint_fast32_t UINT_FAST32_MAX + uint_fast64_t UINT_FAST64_MAX + #7.18.2.4 Limits of integer types capable of holding object pointers + enum: INTPTR_MIN + enum: INTPTR_MAX + enum: UINTPTR_MAX + # 7.18.2.5 Limits of greatest-width integer types + enum: INTMAX_MAX + enum: INTMAX_MIN + enum: UINTMAX_MAX + + # 7.18.3 Limits of other integer types + # ptrdiff_t + enum: PTRDIFF_MIN + enum: PTRDIFF_MAX + # sig_atomic_t + enum: SIG_ATOMIC_MIN + enum: SIG_ATOMIC_MAX + # size_t + size_t SIZE_MAX + # wchar_t + enum: WCHAR_MIN + enum: WCHAR_MAX + # wint_t + enum: WINT_MIN + enum: WINT_MAX diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdio.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdio.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1644a5a0ab02b6948efc10ad8ae4a1296390a905 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdio.pxd @@ -0,0 +1,80 @@ +# 7.19 Input/output + + +# deprecated cimports for backwards compatibility: +from libc.string cimport const_char, const_void + + +cdef extern from "" nogil: + + ctypedef struct FILE + cdef FILE *stdin + cdef FILE *stdout + cdef FILE *stderr + + enum: FOPEN_MAX + enum: FILENAME_MAX + FILE *fopen (const char *filename, const char *opentype) + FILE *freopen (const char *filename, const char *opentype, FILE *stream) + FILE *fdopen (int fdescriptor, const char *opentype) + int fclose (FILE *stream) + int remove (const char *filename) + int rename (const char *oldname, const char *newname) + FILE *tmpfile () + + int remove (const char *pathname) + int rename (const char *oldpath, const char *newpath) + + enum: _IOFBF + enum: _IOLBF + enum: _IONBF + int setvbuf (FILE *stream, char *buf, int mode, size_t size) + enum: BUFSIZ + void setbuf (FILE *stream, char *buf) + + size_t fread (void *data, size_t size, size_t count, FILE *stream) + size_t fwrite (const void *data, size_t size, size_t count, FILE *stream) + int fflush (FILE *stream) + + enum: EOF + void clearerr (FILE *stream) + int feof (FILE *stream) + int ferror (FILE *stream) + + enum: SEEK_SET + enum: SEEK_CUR + enum: SEEK_END + int fseek (FILE *stream, long int offset, int whence) + void rewind (FILE *stream) + long int ftell (FILE *stream) + + ctypedef struct fpos_t + ctypedef const fpos_t const_fpos_t "const fpos_t" + int fgetpos (FILE *stream, fpos_t *position) + int fsetpos (FILE *stream, const fpos_t *position) + + int scanf (const char *template, ...) + int sscanf (const char *s, const char *template, ...) + int fscanf (FILE *stream, const char *template, ...) + + int printf (const char *template, ...) + int sprintf (char *s, const char *template, ...) + int snprintf (char *s, size_t size, const char *template, ...) + int fprintf (FILE *stream, const char *template, ...) + + void perror (const char *message) + + char *gets (char *s) + char *fgets (char *s, int count, FILE *stream) + int getchar () + int fgetc (FILE *stream) + int getc (FILE *stream) + int ungetc (int c, FILE *stream) + + int puts (const char *s) + int fputs (const char *s, FILE *stream) + int putchar (int c) + int fputc (int c, FILE *stream) + int putc (int c, FILE *stream) + + size_t getline(char **lineptr, size_t *n, FILE *stream) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdlib.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdlib.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e6fac821c78eb3a176e3ea5ad46ed8dfc4b0186d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/stdlib.pxd @@ -0,0 +1,72 @@ +# 7.20 General utilities + +# deprecated cimports for backwards compatibility: +from libc.string cimport const_char, const_void + + +cdef extern from "" nogil: + + # 7.20.1 Numeric conversion functions + int atoi (const char *string) + long atol (const char *string) + long long atoll (const char *string) + double atof (const char *string) + long strtol (const char *string, char **tailptr, int base) + unsigned long int strtoul (const char *string, char **tailptr, int base) + long long int strtoll (const char *string, char **tailptr, int base) + unsigned long long int strtoull (const char *string, char **tailptr, int base) + float strtof (const char *string, char **tailptr) + double strtod (const char *string, char **tailptr) + long double strtold (const char *string, char **tailptr) + + # 7.20.2 Pseudo-random sequence generation functions + enum: RAND_MAX + int rand () + void srand (unsigned int seed) + + # 7.20.3 Memory management functions + void *calloc (size_t count, size_t eltsize) + void free (void *ptr) + void *malloc (size_t size) + void *realloc (void *ptr, size_t newsize) + + # 7.20.4 Communication with the environment + enum: EXIT_FAILURE + enum: EXIT_SUCCESS + void exit (int status) + void _exit (int status) + int atexit (void (*function) ()) + void abort () + char *getenv (const char *name) + int system (const char *command) + + #7.20.5 Searching and sorting utilities + void *bsearch (const void *key, const void *array, + size_t count, size_t size, + int (*compare)(const void *, const void *)) + void qsort (void *array, size_t count, size_t size, + int (*compare)(const void *, const void *)) + + # 7.20.6 Integer arithmetic functions + int abs (int number) + long int labs (long int number) + long long int llabs (long long int number) + ctypedef struct div_t: + int quot + int rem + div_t div (int numerator, int denominator) + ctypedef struct ldiv_t: + long int quot + long int rem + ldiv_t ldiv (long int numerator, long int denominator) + ctypedef struct lldiv_t: + long long int quot + long long int rem + lldiv_t lldiv (long long int numerator, long long int denominator) + + + # 7.20.7 Multibyte/wide character conversion functions + # XXX TODO + + # 7.20.8 Multibyte/wide string conversion functions + # XXX TODO diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/string.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/string.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e6d96183f2048c1acb1f23a8b0d667af9ace64e3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/string.pxd @@ -0,0 +1,50 @@ +# 7.21 String handling + +cdef extern from *: + # deprecated backwards compatibility declarations + ctypedef const char const_char "const char" + ctypedef const signed char const_schar "const signed char" + ctypedef const unsigned char const_uchar "const unsigned char" + ctypedef const void const_void "const void" + +cdef extern from "" nogil: + + void *memcpy (void *pto, const void *pfrom, size_t size) + void *memmove (void *pto, const void *pfrom, size_t size) + void *memset (void *block, int c, size_t size) + int memcmp (const void *a1, const void *a2, size_t size) + void *memchr (const void *block, int c, size_t size) + + void *memchr (const void *block, int c, size_t size) + void *memrchr (const void *block, int c, size_t size) + + size_t strlen (const char *s) + char *strcpy (char *pto, const char *pfrom) + char *strncpy (char *pto, const char *pfrom, size_t size) + char *strdup (const char *s) + char *strndup (const char *s, size_t size) + char *strcat (char *pto, const char *pfrom) + char *strncat (char *pto, const char *pfrom, size_t size) + + int strcmp (const char *s1, const char *s2) + int strcasecmp (const char *s1, const char *s2) + int strncmp (const char *s1, const char *s2, size_t size) + int strncasecmp (const char *s1, const char *s2, size_t n) + + int strcoll (const char *s1, const char *s2) + size_t strxfrm (char *pto, const char *pfrom, size_t size) + + char *strerror (int errnum) + + char *strchr (const char *string, int c) + char *strrchr (const char *string, int c) + + char *strstr (const char *haystack, const char *needle) + char *strcasestr (const char *haystack, const char *needle) + + size_t strcspn (const char *string, const char *stopset) + size_t strspn (const char *string, const char *set) + char * strpbrk (const char *string, const char *stopset) + + char *strtok (char *newstring, const char *delimiters) + char *strsep (char **string_ptr, const char *delimiter) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/time.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/time.pxd new file mode 100644 index 0000000000000000000000000000000000000000..318212eea915e5d06d5dae73ef5d6aefc8f82fdc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libc/time.pxd @@ -0,0 +1,47 @@ +# https://en.wikipedia.org/wiki/C_date_and_time_functions + +from libc.stddef cimport wchar_t + +cdef extern from "" nogil: + ctypedef long clock_t + ctypedef long time_t + + enum: CLOCKS_PER_SEC + clock_t clock() # CPU time + time_t time(time_t *) # wall clock time since Unix epoch + + cdef struct tm: + int tm_sec + int tm_min + int tm_hour + int tm_mday + int tm_mon + int tm_year + int tm_wday + int tm_yday + int tm_isdst + # GNU specific extensions + #char *tm_zone + #long tm_gmtoff + + int daylight # global state + long timezone + char *tzname[2] + void tzset() + + char *asctime(const tm *) + char *asctime_r(const tm *, char *) + char *ctime(const time_t *) + char *ctime_r(const time_t *, char *) + double difftime(time_t, time_t) + tm *getdate(const char *) + tm *gmtime(const time_t *) + tm *gmtime_r(const time_t *, tm *) + tm *localtime(const time_t *) + tm *localtime_r(const time_t *, tm *) + time_t mktime(tm *) + size_t strftime(char *, size_t, const char *, const tm *) + size_t wcsftime(wchar_t *str, size_t cnt, const wchar_t *fmt, tm *time) + + # POSIX not stdC + char *strptime(const char *, const char *, tm *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/__init__.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/__init__.pxd @@ -0,0 +1 @@ +# empty file diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/fcntl.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/fcntl.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f7bec9e37530510819740d70d082e0f04a9aaac9 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/fcntl.pxd @@ -0,0 +1,86 @@ +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/fcntl.h.html + +from posix.types cimport mode_t, off_t, pid_t + +cdef extern from "" nogil: + + enum: F_DUPFD + enum: F_DUPFD_CLOEXEC + enum: F_GETFD + enum: F_SETFD + enum: F_GETFL + enum: F_SETFL + enum: F_GETLK + enum: F_SETLK + enum: F_SETLKW + enum: F_GETOWN + enum: F_SETOWN + + enum: FD_CLOEXEC + + enum: F_RDLCK + enum: F_UNLCK + enum: F_WRLCK + + enum: SEEK_SET + enum: SEEK_CUR + enum: SEEK_END + + enum: O_CLOEXEC + enum: O_CREAT + enum: O_DIRECT + enum: O_DIRECTORY + enum: O_EXCL + enum: O_NOCTTY + enum: O_TRUNC + enum: O_TTY_INIT + + enum: O_APPEND + enum: O_DSYNC + enum: O_NONBLOCK + enum: O_RSYNC + enum: O_SYNC + + enum: O_ACCMODE # O_RDONLY|O_WRONLY|O_RDWR + + enum: O_EXEC + enum: O_RDONLY + enum: O_WRONLY + enum: O_RDWR + enum: O_SEARCH + + enum: AT_FDCWD + enum: AT_EACCESS + enum: AT_SYMLINK_NOFOLLOW + enum: AT_SYMLINK_FOLLOW + enum: AT_REMOVEDIR + + enum: S_IFMT + enum: S_IFBLK + enum: S_IFCHR + enum: S_IFIFO + enum: S_IFREG + enum: S_IFDIR + enum: S_IFLNK + enum: S_IFSOCK + + enum: POSIX_FADV_DONTNEED + enum: POSIX_FADV_NOREUSE + enum: POSIX_FADV_NORMAL + enum: POSIX_FADV_RANDOM + enum: POSIX_FADV_SEQUENTIAL + enum: POSIX_FADV_WILLNEED + + struct flock: + short l_type + short l_whence + off_t l_start + off_t l_len + pid_t l_pid + + int creat(const char *, mode_t) + int fcntl(int, int, ...) + int open(const char *, int, ...) + int openat(int, const char *, int, ...) + int posix_fadvise(int, off_t, off_t, int) + int posix_fallocate(int, off_t, off_t) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/mman.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/mman.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9f26f7615f752c7c5ad4406d76f827f01d447b3b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/mman.pxd @@ -0,0 +1,101 @@ +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_mman.h.html +# https://man7.org/linux/man-pages/man2/mmap.2.html +# https://www.freebsd.org/cgi/man.cgi?query=mmap&sektion=2 + +from posix.types cimport off_t, mode_t + +cdef extern from "" nogil: + enum: PROT_EXEC # protection bits for mmap/mprotect + enum: PROT_READ + enum: PROT_WRITE + enum: PROT_NONE + + enum: MAP_PRIVATE # flag bits for mmap + enum: MAP_SHARED + enum: MAP_FIXED + enum: MAP_ANON # These three are not in POSIX, but are + enum: MAP_ANONYMOUS # fairly common in spelling/semantics + enum: MAP_STACK + + enum: MAP_LOCKED # Typically available only on Linux + enum: MAP_HUGETLB + enum: MAP_POPULATE + enum: MAP_NORESERVE + enum: MAP_GROWSDOWN + + enum: MAP_NOCORE # Typically available only on BSD + enum: MAP_NOSYNC + + void *MAP_FAILED + + void *mmap(void *addr, size_t Len, int prot, int flags, int fd, off_t off) + int munmap(void *addr, size_t Len) + int mprotect(void *addr, size_t Len, int prot) + + enum: MS_ASYNC + enum: MS_SYNC + enum: MS_INVALIDATE + int msync(void *addr, size_t Len, int flags) + + enum: POSIX_MADV_NORMAL # POSIX advice flags + enum: POSIX_MADV_SEQUENTIAL + enum: POSIX_MADV_RANDOM + enum: POSIX_MADV_WILLNEED + enum: POSIX_MADV_DONTNEED + int posix_madvise(void *addr, size_t Len, int advice) + + enum: MCL_CURRENT + enum: MCL_FUTURE + int mlock(const void *addr, size_t Len) + int munlock(const void *addr, size_t Len) + int mlockall(int flags) + int munlockall() + # Linux-specific + enum: MLOCK_ONFAULT + enum: MCL_ONFAULT + int mlock2(const void *addr, size_t len, int flags) + + int shm_open(const char *name, int oflag, mode_t mode) + int shm_unlink(const char *name) + + # often available + enum: MADV_NORMAL # pre-POSIX advice flags; should translate 1-1 to POSIX_* + enum: MADV_RANDOM # but in practice it is not always the same. + enum: MADV_SEQUENTIAL + enum: MADV_WILLNEED + enum: MADV_DONTNEED + enum: MADV_REMOVE # other pre-POSIX advice flags; often available + enum: MADV_DONTFORK + enum: MADV_DOFORK + enum: MADV_HWPOISON + enum: MADV_MERGEABLE, + enum: MADV_UNMERGEABLE + enum: MADV_SOFT_OFFLINE + enum: MADV_HUGEPAGE + enum: MADV_NOHUGEPAGE + enum: MADV_DONTDUMP + enum: MADV_DODUMP + enum: MADV_FREE + enum: MADV_WIPEONFORK + enum: MADV_KEEPONFORK + int madvise(void *addr, size_t Len, int advice) + + # sometimes available + int mincore(void *addr, size_t Len, unsigned char *vec) + + # These two are Linux specific but sometimes very efficient + void *mremap(void *old_addr, size_t old_len, size_t new_len, int flags, ...) + int remap_file_pages(void *addr, size_t Len, int prot, + size_t pgoff, int flags) + + # The rare but standardized typed memory option + enum: POSIX_TYPED_MEM_ALLOCATE + enum: POSIX_TYPED_MEM_ALLOCATE_CONTIG + enum: POSIX_TYPED_MEM_MAP_ALLOCATABLE + int posix_typed_mem_open(const char *name, int oflag, int tflag) + int posix_mem_offset(const void *addr, size_t Len, off_t *off, + size_t *contig_len, int *fildes) + + cdef struct posix_typed_mem_info: + size_t posix_tmi_length + int posix_typed_mem_get_info(int fildes, posix_typed_mem_info *info) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/signal.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/signal.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9fe7d9c36c7f9c5eefa055608365856631caaeec --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/signal.pxd @@ -0,0 +1,73 @@ +# 7.14 Signal handling + +from posix.types cimport pid_t, sigset_t, uid_t + +cdef extern from "" nogil: + + cdef union sigval: + int sival_int + void *sival_ptr + + cdef struct sigevent: + int sigev_notify + int sigev_signo + sigval sigev_value + void sigev_notify_function(sigval) + + ctypedef struct siginfo_t: + int si_signo + int si_code + int si_errno + pid_t si_pid + uid_t si_uid + void *si_addr + int si_status + long si_band + sigval si_value + + cdef struct sigaction_t "sigaction": + void sa_handler(int) + void sa_sigaction(int, siginfo_t *, void *) + sigset_t sa_mask + int sa_flags + + ctypedef struct stack_t: + void *ss_sp + int ss_flags + size_t ss_size + + enum: SA_NOCLDSTOP + enum: SIG_BLOCK + enum: SIG_UNBLOCK + enum: SIG_SETMASK + enum: SA_ONSTACK + enum: SA_RESETHAND + enum: SA_RESTART + enum: SA_SIGINFO + enum: SA_NOCLDWAIT + enum: SA_NODEFER + enum: SS_ONSTACK + enum: SS_DISABLE + enum: MINSIGSTKSZ + enum: SIGSTKSZ + + enum: SIGEV_NONE + enum: SIGEV_SIGNAL + enum: SIGEV_THREAD + enum: SIGEV_THREAD_ID + + + int kill (pid_t, int) + int killpg (pid_t, int) + int sigaction (int, const sigaction_t *, sigaction_t *) + int sigpending (sigset_t *) + int sigprocmask (int, const sigset_t *, sigset_t *) + int sigsuspend (const sigset_t *) + + int sigaddset (sigset_t *, int) + int sigdelset (sigset_t *, int) + int sigemptyset (sigset_t *) + int sigfillset (sigset_t *) + int sigismember (const sigset_t *, int) + + int sigaltstack(const stack_t *, stack_t *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stdio.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stdio.pxd new file mode 100644 index 0000000000000000000000000000000000000000..38b81555932249f3d6bcafdbf91e98470caa0a13 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stdio.pxd @@ -0,0 +1,37 @@ +# POSIX additions to . +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/stdio.h.html + +from libc.stdio cimport FILE +from libc.stddef cimport wchar_t +from posix.types cimport off_t + +cdef extern from "" nogil: + # File descriptors + FILE *fdopen(int, const char *) + int fileno(FILE *) + + # Pipes + FILE *popen(const char *, const char *) + int pclose(FILE *) + + # Memory streams (POSIX.2008) + FILE *fmemopen(void *, size_t, const char *) + FILE *open_memstream(char **, size_t *) + FILE *open_wmemstream(wchar_t **, size_t *) + + # Seek and tell with off_t + int fseeko(FILE *, off_t, int) + off_t ftello(FILE *) + + # Locking (for multithreading) + void flockfile(FILE *) + int ftrylockfile(FILE *) + void funlockfile(FILE *) + int getc_unlocked(FILE *) + int getchar_unlocked() + int putc_unlocked(int, FILE *) + int putchar_unlocked(int) + + # Reading lines and records (POSIX.2008) + ssize_t getline(char **, size_t *, FILE *) + ssize_t getdelim(char **, size_t *, int, FILE *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stdlib.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stdlib.pxd new file mode 100644 index 0000000000000000000000000000000000000000..188e2e50103207a05d2c15d3e643ff92ce6ab614 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stdlib.pxd @@ -0,0 +1,29 @@ +# POSIX additions to +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/stdlib.h.html + +cdef extern from "" nogil: + void _Exit(int) + double drand48() + double erand48(unsigned short *) + int getsubopt(char **, char *const *, char **) + void lcong48(unsigned short *) + long lrand() + char *mkdtemp(char *) + int mkstemp(char *) + long mrand() + long nrand48(unsigned short *) + int posix_memalign(void **, size_t, size_t) + int posix_openpt(int) + char *ptsname(int) + int putenv(char *) + int rand_r(unsigned *) + long random() + char *realpath(const char *, char *) + unsigned short *seed48(unsigned short *) + int setenv(const char *, const char *, int) + void setkey(const char *) + char *setstate(char *) + void srand48(long) + void srandom(unsigned) + int unlockpt(int) + int unsetenv(const char *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/time.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/time.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a90cab577a249c758f71d2f86ee9dadf1d9456dd --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/time.pxd @@ -0,0 +1,71 @@ +# https://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/time.h.html + +from posix.types cimport suseconds_t, time_t, clockid_t, timer_t +from posix.signal cimport sigevent + +cdef extern from "" nogil: + enum: CLOCK_REALTIME + enum: TIMER_ABSTIME + enum: CLOCK_MONOTONIC + + # FreeBSD-specific clocks + enum: CLOCK_UPTIME + enum: CLOCK_UPTIME_PRECISE + enum: CLOCK_UPTIME_FAST + enum: CLOCK_REALTIME_PRECISE + enum: CLOCK_REALTIME_FAST + enum: CLOCK_MONOTONIC_PRECISE + enum: CLOCK_MONOTONIC_FAST + enum: CLOCK_SECOND + + # Linux-specific clocks + enum: CLOCK_PROCESS_CPUTIME_ID + enum: CLOCK_THREAD_CPUTIME_ID + enum: CLOCK_MONOTONIC_RAW + enum: CLOCK_REALTIME_COARSE + enum: CLOCK_MONOTONIC_COARSE + enum: CLOCK_BOOTTIME + enum: CLOCK_REALTIME_ALARM + enum: CLOCK_BOOTTIME_ALARM + + enum: ITIMER_REAL + enum: ITIMER_VIRTUAL + enum: ITIMER_PROF + + cdef struct timezone: + int tz_minuteswest + int dsttime + + cdef struct timeval: + time_t tv_sec + suseconds_t tv_usec + + cdef struct timespec: + time_t tv_sec + long tv_nsec + + cdef struct itimerval: + timeval it_interval + timeval it_value + + cdef struct itimerspec: + timespec it_interval + timespec it_value + + int nanosleep(const timespec *, timespec *) + + int getitimer(int, itimerval *) + int gettimeofday(timeval *tp, timezone *tzp) + int setitimer(int, const itimerval *, itimerval *) + + int clock_getcpuclockid(pid_t, clockid_t *) + int clock_getres(clockid_t, timespec *) + int clock_gettime(clockid_t, timespec *) + int clock_nanosleep(clockid_t, int, const timespec *, timespec *) + int clock_settime(clockid_t, const timespec *) + + int timer_create(clockid_t, sigevent *, timer_t *) + int timer_delete(timer_t) + int timer_gettime(timer_t, itimerspec *) + int timer_getoverrun(timer_t) + int timer_settime(timer_t, int, const itimerspec *, itimerspec *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/types.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/types.pxd new file mode 100644 index 0000000000000000000000000000000000000000..308f2954ee2058796fb03ecc58fc2abda85d4ffb --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/types.pxd @@ -0,0 +1,30 @@ +# Note that the actual size of these types is system-dependent, and +# can't be detected at C compile time. However, the generated C code +# will correctly use the actual size of these types *except* for +# determining promotion in binary arithmetic expressions involving +# mixed types. In this case, operands are promoted to the declared +# larger type, with a bias towards typedef types. Thus, with the +# declarations below, long + time_t will result in a time_t whereas +# long long + time_t will result in a long long which should be +# acceptable for either 32-bit or 64-bit signed time_t (though admittedly +# the POSIX standard doesn't even specify that time_t must be an integral +# type). + +cdef extern from "": + ctypedef long blkcnt_t + ctypedef long blksize_t + ctypedef long clockid_t + ctypedef long dev_t + ctypedef long gid_t + ctypedef long id_t + ctypedef unsigned long ino_t + ctypedef long mode_t + ctypedef long nlink_t + ctypedef long off_t + ctypedef long pid_t + ctypedef struct sigset_t: + pass + ctypedef long suseconds_t + ctypedef long time_t + ctypedef long timer_t + ctypedef long uid_t diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.cpython-311-x86_64-linux-gnu.so b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0a0e56d0cda19e27eb1484b462dffaa93277f273 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.cpython-311-x86_64-linux-gnu.so differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.pxd new file mode 100644 index 0000000000000000000000000000000000000000..cd884ced8674d2443efda6a9c2097bde54066d77 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.pxd @@ -0,0 +1,26 @@ +# cython: language_level=3 + +cdef class Action: + cdef perform(self, token_stream, text) + +cdef class Return(Action): + cdef object value + cdef perform(self, token_stream, text) + +cdef class Call(Action): + cdef object function + cdef perform(self, token_stream, text) + +cdef class Method(Action): + cdef str name + cdef dict kwargs + +cdef class Begin(Action): + cdef object state_name + cdef perform(self, token_stream, text) + +cdef class Ignore(Action): + cdef perform(self, token_stream, text) + +cdef class Text(Action): + cdef perform(self, token_stream, text) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.py new file mode 100644 index 0000000000000000000000000000000000000000..725278ddf8bb51786a4cf368e3b80ad4250030ab --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Actions.py @@ -0,0 +1,121 @@ +# cython: language_level=3str +# cython: auto_pickle=False +""" +Python Lexical Analyser + +Actions for use in token specifications +""" + +class Action(object): + def perform(self, token_stream, text): + pass # abstract + + def __copy__(self): + return self # immutable, no need to copy + + def __deepcopy__(self, memo): + return self # immutable, no need to copy + + +class Return(Action): + """ + Internal Plex action which causes |value| to + be returned as the value of the associated token + """ + + def __init__(self, value): + self.value = value + + def perform(self, token_stream, text): + return self.value + + def __repr__(self): + return "Return(%r)" % self.value + + +class Call(Action): + """ + Internal Plex action which causes a function to be called. + """ + + def __init__(self, function): + self.function = function + + def perform(self, token_stream, text): + return self.function(token_stream, text) + + def __repr__(self): + return "Call(%s)" % self.function.__name__ + + +class Method(Action): + """ + Plex action that calls a specific method on the token stream, + passing the matched text and any provided constant keyword arguments. + """ + + def __init__(self, name, **kwargs): + self.name = name + self.kwargs = kwargs or None + + def perform(self, token_stream, text): + method = getattr(token_stream, self.name) + # self.kwargs is almost always unused => avoid call overhead + return method(text, **self.kwargs) if self.kwargs is not None else method(text) + + def __repr__(self): + kwargs = ( + ', '.join(sorted(['%s=%r' % item for item in self.kwargs.items()])) + if self.kwargs is not None else '') + return "Method(%s%s%s)" % (self.name, ', ' if kwargs else '', kwargs) + + +class Begin(Action): + """ + Begin(state_name) is a Plex action which causes the Scanner to + enter the state |state_name|. See the docstring of Plex.Lexicon + for more information. + """ + + def __init__(self, state_name): + self.state_name = state_name + + def perform(self, token_stream, text): + token_stream.begin(self.state_name) + + def __repr__(self): + return "Begin(%s)" % self.state_name + + +class Ignore(Action): + """ + IGNORE is a Plex action which causes its associated token + to be ignored. See the docstring of Plex.Lexicon for more + information. + """ + + def perform(self, token_stream, text): + return None + + def __repr__(self): + return "IGNORE" + + +IGNORE = Ignore() + + +class Text(Action): + """ + TEXT is a Plex action which causes the text of a token to + be returned as the value of the token. See the docstring of + Plex.Lexicon for more information. + """ + + def perform(self, token_stream, text): + return text + + def __repr__(self): + return "TEXT" + + +TEXT = Text() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Errors.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Errors.py new file mode 100644 index 0000000000000000000000000000000000000000..fa10374f89500c749586b3a9546b2d14e37e38c8 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Errors.py @@ -0,0 +1,48 @@ +""" +Python Lexical Analyser + +Exception classes +""" + + +class PlexError(Exception): + message = "" + + +class PlexTypeError(PlexError, TypeError): + pass + + +class PlexValueError(PlexError, ValueError): + pass + + +class InvalidToken(PlexError): + def __init__(self, token_number, message): + PlexError.__init__(self, "Token number %d: %s" % (token_number, message)) + + +class InvalidScanner(PlexError): + pass + + +class AmbiguousAction(PlexError): + message = "Two tokens with different actions can match the same string" + + def __init__(self): + pass + + +class UnrecognizedInput(PlexError): + scanner = None + position = None + state_name = None + + def __init__(self, scanner, state_name): + self.scanner = scanner + self.position = scanner.get_position() + self.state_name = state_name + + def __str__(self): + return ("'%s', line %d, char %d: Token not recognised in state %r" % ( + self.position + (self.state_name,))) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Machines.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Machines.pxd new file mode 100644 index 0000000000000000000000000000000000000000..13b43a2342ec8bf2674bf3324eaeaddbdf3204ba --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Machines.pxd @@ -0,0 +1,33 @@ +cimport cython + +from .Actions cimport Action +from .Transitions cimport TransitionMap + +cdef int maxint + + +@cython.final +cdef class Machine: + cdef readonly list states + cdef readonly dict initial_states + cdef readonly Py_ssize_t next_state_number + + cpdef new_state(self) + cpdef new_initial_state(self, name) + + +@cython.final +cdef class Node: + cdef readonly TransitionMap transitions + cdef readonly Action action + cdef public dict epsilon_closure + cdef readonly Py_ssize_t number + cdef readonly int action_priority + + +@cython.final +cdef class FastMachine: + cdef readonly dict initial_states + cdef readonly dict new_state_template + cdef readonly list states + cdef readonly Py_ssize_t next_number diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Regexps.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Regexps.py new file mode 100644 index 0000000000000000000000000000000000000000..99d8c994a55cbf8b0882e89f6d75cadc8dd82358 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Regexps.py @@ -0,0 +1,540 @@ +""" +Python Lexical Analyser + +Regular Expressions +""" +from __future__ import absolute_import + +import types + +from . import Errors + +maxint = 2**31-1 # sentinel value + +# +# Constants +# + +BOL = 'bol' +EOL = 'eol' +EOF = 'eof' + +nl_code = ord('\n') + + +# +# Helper functions +# + +def chars_to_ranges(s): + """ + Return a list of character codes consisting of pairs + [code1a, code1b, code2a, code2b,...] which cover all + the characters in |s|. + """ + char_list = list(s) + char_list.sort() + i = 0 + n = len(char_list) + result = [] + while i < n: + code1 = ord(char_list[i]) + code2 = code1 + 1 + i += 1 + while i < n and code2 >= ord(char_list[i]): + code2 += 1 + i += 1 + result.append(code1) + result.append(code2) + return result + + +def uppercase_range(code1, code2): + """ + If the range of characters from code1 to code2-1 includes any + lower case letters, return the corresponding upper case range. + """ + code3 = max(code1, ord('a')) + code4 = min(code2, ord('z') + 1) + if code3 < code4: + d = ord('A') - ord('a') + return (code3 + d, code4 + d) + else: + return None + + +def lowercase_range(code1, code2): + """ + If the range of characters from code1 to code2-1 includes any + upper case letters, return the corresponding lower case range. + """ + code3 = max(code1, ord('A')) + code4 = min(code2, ord('Z') + 1) + if code3 < code4: + d = ord('a') - ord('A') + return (code3 + d, code4 + d) + else: + return None + + +def CodeRanges(code_list): + """ + Given a list of codes as returned by chars_to_ranges, return + an RE which will match a character in any of the ranges. + """ + re_list = [CodeRange(code_list[i], code_list[i + 1]) for i in range(0, len(code_list), 2)] + return Alt(*re_list) + + +def CodeRange(code1, code2): + """ + CodeRange(code1, code2) is an RE which matches any character + with a code |c| in the range |code1| <= |c| < |code2|. + """ + if code1 <= nl_code < code2: + return Alt(RawCodeRange(code1, nl_code), + RawNewline, + RawCodeRange(nl_code + 1, code2)) + else: + return RawCodeRange(code1, code2) + + +# +# Abstract classes +# + +class RE(object): + """RE is the base class for regular expression constructors. + The following operators are defined on REs: + + re1 + re2 is an RE which matches |re1| followed by |re2| + re1 | re2 is an RE which matches either |re1| or |re2| + """ + + nullable = 1 # True if this RE can match 0 input symbols + match_nl = 1 # True if this RE can match a string ending with '\n' + str = None # Set to a string to override the class's __str__ result + + def build_machine(self, machine, initial_state, final_state, + match_bol, nocase): + """ + This method should add states to |machine| to implement this + RE, starting at |initial_state| and ending at |final_state|. + If |match_bol| is true, the RE must be able to match at the + beginning of a line. If nocase is true, upper and lower case + letters should be treated as equivalent. + """ + raise NotImplementedError("%s.build_machine not implemented" % + self.__class__.__name__) + + def build_opt(self, m, initial_state, c): + """ + Given a state |s| of machine |m|, return a new state + reachable from |s| on character |c| or epsilon. + """ + s = m.new_state() + initial_state.link_to(s) + initial_state.add_transition(c, s) + return s + + def __add__(self, other): + return Seq(self, other) + + def __or__(self, other): + return Alt(self, other) + + def __str__(self): + if self.str: + return self.str + else: + return self.calc_str() + + def check_re(self, num, value): + if not isinstance(value, RE): + self.wrong_type(num, value, "Plex.RE instance") + + def check_string(self, num, value): + if type(value) != type(''): + self.wrong_type(num, value, "string") + + def check_char(self, num, value): + self.check_string(num, value) + if len(value) != 1: + raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s." + "Expected a string of length 1, got: %s" % ( + num, self.__class__.__name__, repr(value))) + + def wrong_type(self, num, value, expected): + if type(value) == types.InstanceType: + got = "%s.%s instance" % ( + value.__class__.__module__, value.__class__.__name__) + else: + got = type(value).__name__ + raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s " + "(expected %s, got %s" % ( + num, self.__class__.__name__, expected, got)) + +# +# Primitive RE constructors +# ------------------------- +# +# These are the basic REs from which all others are built. +# + + +def Char(c): + """ + Char(c) is an RE which matches the character |c|. + """ + if len(c) == 1: + result = CodeRange(ord(c), ord(c) + 1) + else: + result = SpecialSymbol(c) + result.str = "Char(%s)" % repr(c) + return result + + +class RawCodeRange(RE): + """ + RawCodeRange(code1, code2) is a low-level RE which matches any character + with a code |c| in the range |code1| <= |c| < |code2|, where the range + does not include newline. For internal use only. + """ + nullable = 0 + match_nl = 0 + range = None # (code, code) + uppercase_range = None # (code, code) or None + lowercase_range = None # (code, code) or None + + def __init__(self, code1, code2): + self.range = (code1, code2) + self.uppercase_range = uppercase_range(code1, code2) + self.lowercase_range = lowercase_range(code1, code2) + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + if match_bol: + initial_state = self.build_opt(m, initial_state, BOL) + initial_state.add_transition(self.range, final_state) + if nocase: + if self.uppercase_range: + initial_state.add_transition(self.uppercase_range, final_state) + if self.lowercase_range: + initial_state.add_transition(self.lowercase_range, final_state) + + def calc_str(self): + return "CodeRange(%d,%d)" % (self.code1, self.code2) + + +class _RawNewline(RE): + """ + RawNewline is a low-level RE which matches a newline character. + For internal use only. + """ + nullable = 0 + match_nl = 1 + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + if match_bol: + initial_state = self.build_opt(m, initial_state, BOL) + s = self.build_opt(m, initial_state, EOL) + s.add_transition((nl_code, nl_code + 1), final_state) + + +RawNewline = _RawNewline() + + +class SpecialSymbol(RE): + """ + SpecialSymbol(sym) is an RE which matches the special input + symbol |sym|, which is one of BOL, EOL or EOF. + """ + nullable = 0 + match_nl = 0 + sym = None + + def __init__(self, sym): + self.sym = sym + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + # Sequences 'bol bol' and 'bol eof' are impossible, so only need + # to allow for bol if sym is eol + if match_bol and self.sym == EOL: + initial_state = self.build_opt(m, initial_state, BOL) + initial_state.add_transition(self.sym, final_state) + + +class Seq(RE): + """Seq(re1, re2, re3...) is an RE which matches |re1| followed by + |re2| followed by |re3|...""" + + def __init__(self, *re_list): + nullable = 1 + for i, re in enumerate(re_list): + self.check_re(i, re) + nullable = nullable and re.nullable + self.re_list = re_list + self.nullable = nullable + i = len(re_list) + match_nl = 0 + while i: + i -= 1 + re = re_list[i] + if re.match_nl: + match_nl = 1 + break + if not re.nullable: + break + self.match_nl = match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + re_list = self.re_list + if len(re_list) == 0: + initial_state.link_to(final_state) + else: + s1 = initial_state + n = len(re_list) + for i, re in enumerate(re_list): + if i < n - 1: + s2 = m.new_state() + else: + s2 = final_state + re.build_machine(m, s1, s2, match_bol, nocase) + s1 = s2 + match_bol = re.match_nl or (match_bol and re.nullable) + + def calc_str(self): + return "Seq(%s)" % ','.join(map(str, self.re_list)) + + +class Alt(RE): + """Alt(re1, re2, re3...) is an RE which matches either |re1| or + |re2| or |re3|...""" + + def __init__(self, *re_list): + self.re_list = re_list + nullable = 0 + match_nl = 0 + nullable_res = [] + non_nullable_res = [] + i = 1 + for re in re_list: + self.check_re(i, re) + if re.nullable: + nullable_res.append(re) + nullable = 1 + else: + non_nullable_res.append(re) + if re.match_nl: + match_nl = 1 + i += 1 + self.nullable_res = nullable_res + self.non_nullable_res = non_nullable_res + self.nullable = nullable + self.match_nl = match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + for re in self.nullable_res: + re.build_machine(m, initial_state, final_state, match_bol, nocase) + if self.non_nullable_res: + if match_bol: + initial_state = self.build_opt(m, initial_state, BOL) + for re in self.non_nullable_res: + re.build_machine(m, initial_state, final_state, 0, nocase) + + def calc_str(self): + return "Alt(%s)" % ','.join(map(str, self.re_list)) + + +class Rep1(RE): + """Rep1(re) is an RE which matches one or more repetitions of |re|.""" + + def __init__(self, re): + self.check_re(1, re) + self.re = re + self.nullable = re.nullable + self.match_nl = re.match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + s1 = m.new_state() + s2 = m.new_state() + initial_state.link_to(s1) + self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase) + s2.link_to(s1) + s2.link_to(final_state) + + def calc_str(self): + return "Rep1(%s)" % self.re + + +class SwitchCase(RE): + """ + SwitchCase(re, nocase) is an RE which matches the same strings as RE, + but treating upper and lower case letters according to |nocase|. If + |nocase| is true, case is ignored, otherwise it is not. + """ + re = None + nocase = None + + def __init__(self, re, nocase): + self.re = re + self.nocase = nocase + self.nullable = re.nullable + self.match_nl = re.match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + self.re.build_machine(m, initial_state, final_state, match_bol, + self.nocase) + + def calc_str(self): + if self.nocase: + name = "NoCase" + else: + name = "Case" + return "%s(%s)" % (name, self.re) + + +# +# Composite RE constructors +# ------------------------- +# +# These REs are defined in terms of the primitive REs. +# + +Empty = Seq() +Empty.__doc__ = \ + """ + Empty is an RE which matches the empty string. + """ +Empty.str = "Empty" + + +def Str1(s): + """ + Str1(s) is an RE which matches the literal string |s|. + """ + result = Seq(*tuple(map(Char, s))) + result.str = "Str(%s)" % repr(s) + return result + + +def Str(*strs): + """ + Str(s) is an RE which matches the literal string |s|. + Str(s1, s2, s3, ...) is an RE which matches any of |s1| or |s2| or |s3|... + """ + if len(strs) == 1: + return Str1(strs[0]) + else: + result = Alt(*tuple(map(Str1, strs))) + result.str = "Str(%s)" % ','.join(map(repr, strs)) + return result + + +def Any(s): + """ + Any(s) is an RE which matches any character in the string |s|. + """ + result = CodeRanges(chars_to_ranges(s)) + result.str = "Any(%s)" % repr(s) + return result + + +def AnyBut(s): + """ + AnyBut(s) is an RE which matches any character (including + newline) which is not in the string |s|. + """ + ranges = chars_to_ranges(s) + ranges.insert(0, -maxint) + ranges.append(maxint) + result = CodeRanges(ranges) + result.str = "AnyBut(%s)" % repr(s) + return result + + +AnyChar = AnyBut("") +AnyChar.__doc__ = \ + """ + AnyChar is an RE which matches any single character (including a newline). + """ +AnyChar.str = "AnyChar" + + +def Range(s1, s2=None): + """ + Range(c1, c2) is an RE which matches any single character in the range + |c1| to |c2| inclusive. + Range(s) where |s| is a string of even length is an RE which matches + any single character in the ranges |s[0]| to |s[1]|, |s[2]| to |s[3]|,... + """ + if s2: + result = CodeRange(ord(s1), ord(s2) + 1) + result.str = "Range(%s,%s)" % (s1, s2) + else: + ranges = [] + for i in range(0, len(s1), 2): + ranges.append(CodeRange(ord(s1[i]), ord(s1[i + 1]) + 1)) + result = Alt(*ranges) + result.str = "Range(%s)" % repr(s1) + return result + + +def Opt(re): + """ + Opt(re) is an RE which matches either |re| or the empty string. + """ + result = Alt(re, Empty) + result.str = "Opt(%s)" % re + return result + + +def Rep(re): + """ + Rep(re) is an RE which matches zero or more repetitions of |re|. + """ + result = Opt(Rep1(re)) + result.str = "Rep(%s)" % re + return result + + +def NoCase(re): + """ + NoCase(re) is an RE which matches the same strings as RE, but treating + upper and lower case letters as equivalent. + """ + return SwitchCase(re, nocase=1) + + +def Case(re): + """ + Case(re) is an RE which matches the same strings as RE, but treating + upper and lower case letters as distinct, i.e. it cancels the effect + of any enclosing NoCase(). + """ + return SwitchCase(re, nocase=0) + + +# +# RE Constants +# + +Bol = Char(BOL) +Bol.__doc__ = \ + """ + Bol is an RE which matches the beginning of a line. + """ +Bol.str = "Bol" + +Eol = Char(EOL) +Eol.__doc__ = \ + """ + Eol is an RE which matches the end of a line. + """ +Eol.str = "Eol" + +Eof = Char(EOF) +Eof.__doc__ = \ + """ + Eof is an RE which matches the end of the file. + """ +Eof.str = "Eof" diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Scanners.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Scanners.py new file mode 100644 index 0000000000000000000000000000000000000000..ad85f4465ee6b5e6f0d23fc077c6d7a0eef712f4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Scanners.py @@ -0,0 +1,359 @@ +# cython: language_level=3str +# cython: auto_pickle=False +""" +Python Lexical Analyser + +Scanning an input stream +""" +from __future__ import absolute_import + +import cython + +cython.declare(BOL=object, EOL=object, EOF=object, NOT_FOUND=object) # noqa:E402 + +from . import Errors +from .Regexps import BOL, EOL, EOF + +NOT_FOUND = object() + + +class Scanner(object): + """ + A Scanner is used to read tokens from a stream of characters + using the token set specified by a Plex.Lexicon. + + Constructor: + + Scanner(lexicon, stream, name = '') + + See the docstring of the __init__ method for details. + + Methods: + + See the docstrings of the individual methods for more + information. + + read() --> (value, text) + Reads the next lexical token from the stream. + + position() --> (name, line, col) + Returns the position of the last token read using the + read() method. + + begin(state_name) + Causes scanner to change state. + + produce(value [, text]) + Causes return of a token value to the caller of the + Scanner. + + """ + + # lexicon = None # Lexicon + # stream = None # file-like object + # name = '' + # buffer = '' + # + # These positions are used by the scanner to track its internal state: + # buf_start_pos = 0 # position in input of start of buffer + # next_pos = 0 # position in input of next char to read + # cur_pos = 0 # position in input of current char + # cur_line = 1 # line number of current char + # cur_line_start = 0 # position in input of start of current line + # start_pos = 0 # position in input of start of token + # current_scanner_position_tuple = ("", 0, 0) + # tuple of filename, line number and position in line, really mainly for error reporting + # + # These positions are used to track what was read from the queue + # (which may differ from the internal state when tokens are replaced onto the queue) + # last_token_position_tuple = ("", 0, 0) # tuple of filename, line number and position in line + + # text = None # text of last token read + # initial_state = None # Node + # state_name = '' # Name of initial state + # queue = None # list of tokens and positions to be returned + # trace = 0 + + def __init__(self, lexicon, stream, name='', initial_pos=None): + """ + Scanner(lexicon, stream, name = '') + + |lexicon| is a Plex.Lexicon instance specifying the lexical tokens + to be recognised. + + |stream| can be a file object or anything which implements a + compatible read() method. + + |name| is optional, and may be the name of the file being + scanned or any other identifying string. + """ + self.trace = 0 + + self.buffer = u'' + self.buf_start_pos = 0 + self.next_pos = 0 + self.cur_pos = 0 + self.cur_line = 1 + self.start_pos = 0 + self.current_scanner_position_tuple = ("", 0, 0) + self.last_token_position_tuple = ("", 0, 0) + self.text = None + self.state_name = None + + self.lexicon = lexicon + self.stream = stream + self.name = name + self.queue = [] + self.initial_state = None + self.begin('') + self.next_pos = 0 + self.cur_pos = 0 + self.cur_line_start = 0 + self.cur_char = BOL + self.input_state = 1 + if initial_pos is not None: + self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2] + + def read(self): + """ + Read the next lexical token from the stream and return a + tuple (value, text), where |value| is the value associated with + the token as specified by the Lexicon, and |text| is the actual + string read from the stream. Returns (None, '') on end of file. + """ + queue = self.queue + while not queue: + self.text, action = self.scan_a_token() + if action is None: + self.produce(None) + self.eof() + else: + value = action.perform(self, self.text) + if value is not None: + self.produce(value) + result, self.last_token_position_tuple = queue[0] + del queue[0] + return result + + def unread(self, token, value, position): + self.queue.insert(0, ((token, value), position)) + + def get_current_scan_pos(self): + # distinct from the position of the last token due to the queue + return self.current_scanner_position_tuple + + def scan_a_token(self): + """ + Read the next input sequence recognised by the machine + and return (text, action). Returns ('', None) on end of + file. + """ + self.start_pos = self.cur_pos + self.current_scanner_position_tuple = ( + self.name, self.cur_line, self.cur_pos - self.cur_line_start + ) + action = self.run_machine_inlined() + if action is not None: + if self.trace: + print("Scanner: read: Performing %s %d:%d" % ( + action, self.start_pos, self.cur_pos)) + text = self.buffer[ + self.start_pos - self.buf_start_pos: + self.cur_pos - self.buf_start_pos] + return (text, action) + else: + if self.cur_pos == self.start_pos: + if self.cur_char is EOL: + self.next_char() + if self.cur_char is None or self.cur_char is EOF: + return (u'', None) + raise Errors.UnrecognizedInput(self, self.state_name) + + def run_machine_inlined(self): + """ + Inlined version of run_machine for speed. + """ + state = self.initial_state + cur_pos = self.cur_pos + cur_line = self.cur_line + cur_line_start = self.cur_line_start + cur_char = self.cur_char + input_state = self.input_state + next_pos = self.next_pos + buffer = self.buffer + buf_start_pos = self.buf_start_pos + buf_len = len(buffer) + b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ + None, 0, 0, 0, u'', 0, 0 + + trace = self.trace + while 1: + if trace: + print("State %d, %d/%d:%s -->" % ( + state['number'], input_state, cur_pos, repr(cur_char))) + + # Begin inlined self.save_for_backup() + action = state['action'] + if action is not None: + b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ + action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos + # End inlined self.save_for_backup() + + c = cur_char + new_state = state.get(c, NOT_FOUND) + if new_state is NOT_FOUND: + new_state = c and state.get('else') + + if new_state: + if trace: + print("State %d" % new_state['number']) + state = new_state + # Begin inlined: self.next_char() + if input_state == 1: + cur_pos = next_pos + # Begin inlined: c = self.read_char() + buf_index = next_pos - buf_start_pos + if buf_index < buf_len: + c = buffer[buf_index] + next_pos += 1 + else: + discard = self.start_pos - buf_start_pos + data = self.stream.read(0x1000) + buffer = self.buffer[discard:] + data + self.buffer = buffer + buf_start_pos += discard + self.buf_start_pos = buf_start_pos + buf_len = len(buffer) + buf_index -= discard + if data: + c = buffer[buf_index] + next_pos += 1 + else: + c = u'' + # End inlined: c = self.read_char() + if c == u'\n': + cur_char = EOL + input_state = 2 + elif not c: + cur_char = EOL + input_state = 4 + else: + cur_char = c + elif input_state == 2: # after EoL (1) -> BoL (3) + cur_char = u'\n' + input_state = 3 + elif input_state == 3: # start new code line + cur_line += 1 + cur_line_start = cur_pos = next_pos + cur_char = BOL + input_state = 1 + elif input_state == 4: # after final line (1) -> EoF (5) + cur_char = EOF + input_state = 5 + else: # input_state == 5 (EoF) + cur_char = u'' + # End inlined self.next_char() + else: # not new_state + if trace: + print("blocked") + # Begin inlined: action = self.back_up() + if b_action is not None: + (action, cur_pos, cur_line, cur_line_start, + cur_char, input_state, next_pos) = \ + (b_action, b_cur_pos, b_cur_line, b_cur_line_start, + b_cur_char, b_input_state, b_next_pos) + else: + action = None + break # while 1 + # End inlined: action = self.back_up() + + self.cur_pos = cur_pos + self.cur_line = cur_line + self.cur_line_start = cur_line_start + self.cur_char = cur_char + self.input_state = input_state + self.next_pos = next_pos + if trace: + if action is not None: + print("Doing %s" % action) + return action + + def next_char(self): + input_state = self.input_state + if self.trace: + print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos)) + if input_state == 1: + self.cur_pos = self.next_pos + c = self.read_char() + if c == u'\n': + self.cur_char = EOL + self.input_state = 2 + elif not c: + self.cur_char = EOL + self.input_state = 4 + else: + self.cur_char = c + elif input_state == 2: + self.cur_char = u'\n' + self.input_state = 3 + elif input_state == 3: + self.cur_line += 1 + self.cur_line_start = self.cur_pos = self.next_pos + self.cur_char = BOL + self.input_state = 1 + elif input_state == 4: + self.cur_char = EOF + self.input_state = 5 + else: # input_state = 5 + self.cur_char = u'' + if self.trace: + print("--> [%d] %d %r" % (input_state, self.cur_pos, self.cur_char)) + + def position(self): + """ + Return a tuple (name, line, col) representing the location of + the last token read using the read() method. |name| is the + name that was provided to the Scanner constructor; |line| + is the line number in the stream (1-based); |col| is the + position within the line of the first character of the token + (0-based). + """ + return self.last_token_position_tuple + + def get_position(self): + """ + Python accessible wrapper around position(), only for error reporting. + """ + return self.position() + + def begin(self, state_name): + """Set the current state of the scanner to the named state.""" + self.initial_state = ( + self.lexicon.get_initial_state(state_name)) + self.state_name = state_name + + def produce(self, value, text=None): + """ + Called from an action procedure, causes |value| to be returned + as the token value from read(). If |text| is supplied, it is + returned in place of the scanned text. + + produce() can be called more than once during a single call to an action + procedure, in which case the tokens are queued up and returned one + at a time by subsequent calls to read(), until the queue is empty, + whereupon scanning resumes. + """ + if text is None: + text = self.text + self.queue.append(((value, text), self.current_scanner_position_tuple)) + + def eof(self): + """ + Override this method if you want something to be done at + end of file. + """ + pass + + @property + def start_line(self): + return self.last_token_position_tuple[1] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.py new file mode 100644 index 0000000000000000000000000000000000000000..f58dd538e26cea710a86c7ab1983650d0b4d15e5 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.py @@ -0,0 +1,234 @@ +# cython: auto_pickle=False +""" +Plex - Transition Maps + +This version represents state sets directly as dicts for speed. +""" + +maxint = 2**31-1 # sentinel value + + +class TransitionMap(object): + """ + A TransitionMap maps an input event to a set of states. + An input event is one of: a range of character codes, + the empty string (representing an epsilon move), or one + of the special symbols BOL, EOL, EOF. + + For characters, this implementation compactly represents + the map by means of a list: + + [code_0, states_0, code_1, states_1, code_2, states_2, + ..., code_n-1, states_n-1, code_n] + + where |code_i| is a character code, and |states_i| is a + set of states corresponding to characters with codes |c| + in the range |code_i| <= |c| <= |code_i+1|. + + The following invariants hold: + n >= 1 + code_0 == -maxint + code_n == maxint + code_i < code_i+1 for i in 0..n-1 + states_0 == states_n-1 + + Mappings for the special events '', BOL, EOL, EOF are + kept separately in a dictionary. + """ + + def __init__(self, map=None, special=None): + if not map: + map = [-maxint, {}, maxint] + if not special: + special = {} + self.map = map # The list of codes and states + self.special = special # Mapping for special events + + def add(self, event, new_state): + """ + Add transition to |new_state| on |event|. + """ + if type(event) is tuple: + code0, code1 = event + i = self.split(code0) + j = self.split(code1) + map = self.map + while i < j: + map[i + 1][new_state] = 1 + i += 2 + else: + self.get_special(event)[new_state] = 1 + + def add_set(self, event, new_set): + """ + Add transitions to the states in |new_set| on |event|. + """ + if type(event) is tuple: + code0, code1 = event + i = self.split(code0) + j = self.split(code1) + map = self.map + while i < j: + map[i + 1].update(new_set) + i += 2 + else: + self.get_special(event).update(new_set) + + def get_epsilon(self): + """ + Return the mapping for epsilon, or None. + """ + return self.special.get('') + + def iteritems(self): + """ + Return the mapping as an iterable of ((code1, code2), state_set) and + (special_event, state_set) pairs. + """ + result = [] + map = self.map + else_set = map[1] + i = 0 + n = len(map) - 1 + code0 = map[0] + while i < n: + set = map[i + 1] + code1 = map[i + 2] + if set or else_set: + result.append(((code0, code1), set)) + code0 = code1 + i += 2 + for event, set in self.special.items(): + if set: + result.append((event, set)) + return iter(result) + + items = iteritems + + # ------------------- Private methods -------------------- + + def split(self, code): + """ + Search the list for the position of the split point for |code|, + inserting a new split point if necessary. Returns index |i| such + that |code| == |map[i]|. + """ + # We use a funky variation on binary search. + map = self.map + hi = len(map) - 1 + # Special case: code == map[-1] + if code == maxint: + return hi + + # General case + lo = 0 + # loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2 + while hi - lo >= 4: + # Find midpoint truncated to even index + mid = ((lo + hi) // 2) & ~1 + if code < map[mid]: + hi = mid + else: + lo = mid + # map[lo] <= code < map[hi] and hi - lo == 2 + if map[lo] == code: + return lo + else: + map[hi:hi] = [code, map[hi - 1].copy()] + return hi + + def get_special(self, event): + """ + Get state set for special event, adding a new entry if necessary. + """ + special = self.special + set = special.get(event, None) + if not set: + set = {} + special[event] = set + return set + + # --------------------- Conversion methods ----------------------- + + def __str__(self): + map_strs = [] + map = self.map + n = len(map) + i = 0 + while i < n: + code = map[i] + if code == -maxint: + code_str = "-inf" + elif code == maxint: + code_str = "inf" + else: + code_str = str(code) + map_strs.append(code_str) + i += 1 + if i < n: + map_strs.append(state_set_str(map[i])) + i += 1 + special_strs = {} + for event, set in self.special.items(): + special_strs[event] = state_set_str(set) + return "[%s]+%s" % ( + ','.join(map_strs), + special_strs + ) + + # --------------------- Debugging methods ----------------------- + + def check(self): + """Check data structure integrity.""" + if not self.map[-3] < self.map[-1]: + print(self) + assert 0 + + def dump(self, file): + map = self.map + i = 0 + n = len(map) - 1 + while i < n: + self.dump_range(map[i], map[i + 2], map[i + 1], file) + i += 2 + for event, set in self.special.items(): + if set: + if not event: + event = 'empty' + self.dump_trans(event, set, file) + + def dump_range(self, code0, code1, set, file): + if set: + if code0 == -maxint: + if code1 == maxint: + k = "any" + else: + k = "< %s" % self.dump_char(code1) + elif code1 == maxint: + k = "> %s" % self.dump_char(code0 - 1) + elif code0 == code1 - 1: + k = self.dump_char(code0) + else: + k = "%s..%s" % (self.dump_char(code0), + self.dump_char(code1 - 1)) + self.dump_trans(k, set, file) + + def dump_char(self, code): + if 0 <= code <= 255: + return repr(chr(code)) + else: + return "chr(%d)" % code + + def dump_trans(self, key, set, file): + file.write(" %s --> %s\n" % (key, self.dump_set(set))) + + def dump_set(self, set): + return state_set_str(set) + + +# +# State set manipulation functions +# + +def state_set_str(set): + return "[%s]" % ','.join(["S%d" % state.number for state in set]) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Errors.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Errors.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e2f23367f751be38a62484c9f58752d3bb2731 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/__pycache__/Errors.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_looper.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_looper.py new file mode 100644 index 0000000000000000000000000000000000000000..4010988300ffd12da5dd136f1f173d584261191e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_looper.py @@ -0,0 +1,163 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +import sys +from Cython.Tempita.compat3 import basestring_ + +__all__ = ['looper'] + + +class looper(object): + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter(object): + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + if sys.version < "3": + next = __next__ + + +class loop_pos(object): + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + if sys.version < "3": + next = __next__ + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/compat3.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/compat3.py new file mode 100644 index 0000000000000000000000000000000000000000..9905530757ae803920bb0ae6a76415db3e900863 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/compat3.py @@ -0,0 +1,47 @@ +import sys + +__all__ = ['b', 'basestring_', 'bytes', 'unicode_', 'next', 'is_unicode'] + +if sys.version < "3": + b = bytes = str + basestring_ = basestring + unicode_ = unicode +else: + + def b(s): + if isinstance(s, str): + return s.encode('latin1') + return bytes(s) + basestring_ = (bytes, str) + bytes = bytes + unicode_ = str +text = str + +if sys.version < "3": + + def next(obj): + return obj.next() +else: + next = next + +if sys.version < "3": + + def is_unicode(obj): + return isinstance(obj, unicode) +else: + + def is_unicode(obj): + return isinstance(obj, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if sys.version < "3": + attr = '__unicode__' + else: + attr = '__str__' + if hasattr(v, attr): + return unicode(v) + else: + return bytes(v) + return v diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Coverage.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Coverage.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac375b3e272f3ad6579525c854576a948b2b850c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Coverage.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Debugging.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Debugging.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7980dd9be0b3b9133ec98e30c08ceb9817d9184b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Debugging.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c91a904a13496ecab5a3a6c8caa026970d99a540 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__init__.py @@ -0,0 +1,20 @@ +from .betweenness import * +from .betweenness_subset import * +from .closeness import * +from .current_flow_betweenness import * +from .current_flow_betweenness_subset import * +from .current_flow_closeness import * +from .degree_alg import * +from .dispersion import * +from .eigenvector import * +from .group import * +from .harmonic import * +from .katz import * +from .load import * +from .percolation import * +from .reaching import * +from .second_order import * +from .subgraph_alg import * +from .trophic import * +from .voterank_alg import * +from .laplacian import * diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..846ae4d0c0c0751b4df41441e8c171418913ffef Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..613ec4eec59dc8e2edc725ba97e583f5eb759cae Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2717d8bd0d80d46d76502c2bef63089c23300bca Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19fde9cad80c26cb70f7aa209be53a1e55a1dc40 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/betweenness_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c1acdf4ffe4d7423a49bcdf8c340886c998b3b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/betweenness_subset.py @@ -0,0 +1,274 @@ +"""Betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _add_edge_keys, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = [ + "betweenness_centrality_subset", + "edge_betweenness_centrality_subset", +] + + +@nx._dispatch(edge_attrs="weight") +def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None): + r"""Compute betweenness centrality for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|v)$ is the number of those paths + passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, + and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_. + + + Parameters + ---------- + G : graph + A NetworkX graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by $2/((n-1)(n-2))$ + for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is + designed to make betweenness_centrality(G) be the same as + betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_subset(b, S, P, sigma, s, targets) + b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed()) + return b + + +@nx._dispatch(edge_attrs="weight") +def edge_betweenness_centrality_subset( + G, sources, targets, normalized=False, weight=None +): + r"""Compute betweenness centrality for edges for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|e)$ is the number of those paths + passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A networkx graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by `2/(n(n-1))` + for graphs, and `1/(n(n-1))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + edges : dictionary + Dictionary of edges with Betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is the same + as in edge_betweenness_centrality() and is designed to make + edge_betweenness_centrality(G) be the same as + edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges() + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_edges_subset(b, S, P, sigma, s, targets) + for n in G: # remove nodes to only return edges + del b[n] + b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed()) + if G.is_multigraph(): + b = _add_edge_keys(G, b, weight=weight) + return b + + +def _accumulate_subset(betweenness, S, P, sigma, s, targets): + delta = dict.fromkeys(S, 0.0) + target_set = set(targets) - {s} + while S: + w = S.pop() + if w in target_set: + coeff = (delta[w] + 1.0) / sigma[w] + else: + coeff = delta[w] / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets): + """edge_betweenness_centrality_subset helper.""" + delta = dict.fromkeys(S, 0) + target_set = set(targets) + while S: + w = S.pop() + for v in P[w]: + if w in target_set: + c = (sigma[v] / sigma[w]) * (1.0 + delta[w]) + else: + c = delta[w] / len(P[w]) + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False): + """betweenness_centrality_subset helper.""" + if normalized: + if n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False): + """edge_betweenness_centrality_subset helper.""" + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1b2c8f2f49f97020adf100b495f20ec3f19ce1 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py @@ -0,0 +1,343 @@ +"""Current-flow betweenness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, + flow_matrix_row, +) +from networkx.utils import ( + not_implemented_for, + py_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "current_flow_betweenness_centrality", + "approximate_current_flow_betweenness_centrality", + "edge_current_flow_betweenness_centrality", +] + + +@not_implemented_for("directed") +@py_random_state(7) +@nx._dispatch(edge_attrs="weight") +def approximate_current_flow_betweenness_centrality( + G, + normalized=True, + weight=None, + dtype=float, + solver="full", + epsilon=0.5, + kmax=10000, + seed=None, +): + r"""Compute the approximate current-flow betweenness centrality for nodes. + + Approximates the current-flow betweenness centrality within absolute + error of epsilon with high probability [1]_. + + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + epsilon: float + Absolute error tolerance. + + kmax: int + Maximum number of sample node pairs to use for approximation. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + current_flow_betweenness_centrality + + Notes + ----- + The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$ + and the space required is $O(m)$ for $n$ nodes and $m$ edges. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer: + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + betweenness = dict.fromkeys(H, 0.0) + nb = (n - 1.0) * (n - 2.0) # normalization factor + cstar = n * (n - 1) / nb + l = 1 # parameter in approximation, adjustable + k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n))) + if k > kmax: + msg = f"Number random pairs k>kmax ({k}>{kmax}) " + raise nx.NetworkXError(msg, "Increase kmax or epsilon") + cstar2k = cstar / (2 * k) + for _ in range(k): + s, t = pair = seed.sample(range(n), 2) + b = np.zeros(n, dtype=dtype) + b[s] = 1 + b[t] = -1 + p = C.solve(b) + for v in H: + if v in pair: + continue + for nbr in H[v]: + w = H[v][nbr].get(weight, 1.0) + betweenness[v] += w * np.abs(p[v] - p[nbr]) * cstar2k + if normalized: + factor = 1.0 + else: + factor = nb / 2.0 + # remap to original node names and "unnormalize" if required + return {ordering[k]: v * factor for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(n))) + for i in range(n): + betweenness[s] += (i - pos[i]) * row[i] + betweenness[t] += (n - i - 1 - pos[i]) * row[i] + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = float((betweenness[v] - v) * 2.0 / nb) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def edge_current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for edges. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of edge tuples with betweenness centrality as the value. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(1, n + 1))) + for i in range(n): + betweenness[e] += (i + 1 - pos[i]) * row[i] + betweenness[e] += (n - i - pos[i]) * row[i] + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/harmonic.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/harmonic.py new file mode 100644 index 0000000000000000000000000000000000000000..86b5020f96c49f4d647bea5d1624b862ee54c849 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/harmonic.py @@ -0,0 +1,80 @@ +"""Functions for computing the harmonic centrality of a graph.""" +from functools import partial + +import networkx as nx + +__all__ = ["harmonic_centrality"] + + +@nx._dispatch(edge_attrs="distance") +def harmonic_centrality(G, nbunch=None, distance=None, sources=None): + r"""Compute harmonic centrality for nodes. + + Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal + of the shortest path distances from all other nodes to `u` + + .. math:: + + C(u) = \sum_{v \neq u} \frac{1}{d(v, u)} + + where `d(v, u)` is the shortest-path distance between `v` and `u`. + + If `sources` is given as an argument, the returned harmonic centrality + values are calculated as the sum of the reciprocals of the shortest + path distances from the nodes specified in `sources` to `u` instead + of from all nodes to `u`. + + Notice that higher values indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : container (default: all nodes in G) + Container of nodes for which harmonic centrality values are calculated. + + sources : container (default: all nodes in G) + Container of nodes `v` over which reciprocal distances are computed. + Nodes not in `G` are silently ignored. + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None`, then each edge will have distance equal to 1. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with harmonic centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + References + ---------- + .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality." + Internet Mathematics 10.3-4 (2014): 222-262. + """ + + nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes) + sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes + + spl = partial(nx.shortest_path_length, G, weight=distance) + centrality = {u: 0 for u in nbunch} + for v in sources: + dist = spl(v) + for u in nbunch.intersection(dist): + d = dist[u] + if d == 0: # handle u == v and edges with 0 weight + continue + centrality[u] += 1 / d + + return centrality diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/load.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/load.py new file mode 100644 index 0000000000000000000000000000000000000000..9a81cc43282d2cdd19fb365d6265c3d128faddc9 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/load.py @@ -0,0 +1,199 @@ +"""Load centrality.""" +from operator import itemgetter + +import networkx as nx + +__all__ = ["load_centrality", "edge_load_centrality"] + + +@nx._dispatch(edge_attrs="weight") +def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None): + """Compute load centrality for nodes. + + The load centrality of a node is the fraction of all shortest + paths that pass through that node. + + Parameters + ---------- + G : graph + A networkx graph. + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + cutoff : bool, optional (default=None) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Load centrality is slightly different than betweenness. It was originally + introduced by [2]_. For this load algorithm see [1]_. + + References + ---------- + .. [1] Mark E. J. Newman: + Scientific collaboration networks. II. + Shortest paths, weighted networks, and centrality. + Physical Review E 64, 016132, 2001. + http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132 + .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim + Universal behavior of Load Distribution in Scale-Free Networks. + Physical Review Letters 87(27):1–4, 2001. + https://doi.org/10.1103/PhysRevLett.87.278701 + """ + if v is not None: # only one node + betweenness = 0.0 + for source in G: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + betweenness += ubetween[v] if v in ubetween else 0 + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + betweenness *= 1.0 / ((order - 1) * (order - 2)) + else: + betweenness = {}.fromkeys(G, 0.0) + for source in betweenness: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + for vk in ubetween: + betweenness[vk] += ubetween[vk] + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + scale = 1.0 / ((order - 1) * (order - 2)) + for v in betweenness: + betweenness[v] *= scale + return betweenness # all nodes + + +def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None): + """Node betweenness_centrality helper: + + See betweenness_centrality for what you probably want. + This actually computes "load" and not betweenness. + See https://networkx.lanl.gov/ticket/103 + + This calculates the load of each node for paths from a single source. + (The fraction of number of shortests paths from source that go + through each node.) + + To get the load for a node you need to do all-pairs shortest paths. + + If weight is not None then use Dijkstra for finding shortest paths. + """ + # get the predecessor and path length data + if weight is None: + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + else: + (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight) + + # order the nodes by path length + onodes = [(l, vert) for (vert, l) in length.items()] + onodes.sort() + onodes[:] = [vert for (l, vert) in onodes if l > 0] + + # initialize betweenness + between = {}.fromkeys(length, 1.0) + + while onodes: + v = onodes.pop() + if v in pred: + num_paths = len(pred[v]) # Discount betweenness if more than + for x in pred[v]: # one shortest path. + if x == source: # stop if hit source because all remaining v + break # also have pred[v]==[source] + between[x] += between[v] / num_paths + # remove source + for v in between: + between[v] -= 1 + # rescale to be between 0 and 1 + if normalized: + l = len(between) + if l > 2: + # scale by 1/the number of possible paths + scale = 1 / ((l - 1) * (l - 2)) + for v in between: + between[v] *= scale + return between + + +load_centrality = newman_betweenness_centrality + + +@nx._dispatch +def edge_load_centrality(G, cutoff=False): + """Compute edge load. + + WARNING: This concept of edge load has not been analysed + or discussed outside of NetworkX that we know of. + It is based loosely on load_centrality in the sense that + it counts the number of shortest paths which cross each edge. + This function is for demonstration and testing purposes. + + Parameters + ---------- + G : graph + A networkx graph + + cutoff : bool, optional (default=False) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + A dict keyed by edge 2-tuple to the number of shortest paths + which use that edge. Where more than one path is shortest + the count is divided equally among paths. + """ + betweenness = {} + for u, v in G.edges(): + betweenness[(u, v)] = 0.0 + betweenness[(v, u)] = 0.0 + + for source in G: + ubetween = _edge_betweenness(G, source, cutoff=cutoff) + for e, ubetweenv in ubetween.items(): + betweenness[e] += ubetweenv # cumulative total + return betweenness + + +def _edge_betweenness(G, source, nodes=None, cutoff=False): + """Edge betweenness helper.""" + # get the predecessor data + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + # order the nodes by path length + onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))] + # initialize betweenness, doesn't account for any edge weights + between = {} + for u, v in G.edges(nodes): + between[(u, v)] = 1.0 + between[(v, u)] = 1.0 + + while onodes: # work through all paths + v = onodes.pop() + if v in pred: + # Discount betweenness if more than one shortest path. + num_paths = len(pred[v]) + for w in pred[v]: + if w in pred: + # Discount betweenness, mult path + num_paths = len(pred[w]) + for x in pred[w]: + between[(w, x)] += between[(v, w)] / num_paths + between[(x, w)] += between[(w, v)] / num_paths + return between diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/reaching.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/reaching.py new file mode 100644 index 0000000000000000000000000000000000000000..7b9eac564acc0dcde38409007e9df38863ee24de --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/centrality/reaching.py @@ -0,0 +1,206 @@ +"""Functions for computing reaching centrality of a node or a graph.""" + +import networkx as nx +from networkx.utils import pairwise + +__all__ = ["global_reaching_centrality", "local_reaching_centrality"] + + +def _average_weight(G, path, weight=None): + """Returns the average weight of an edge in a weighted path. + + Parameters + ---------- + G : graph + A networkx graph. + + path: list + A list of vertices that define the path. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. Then the average weight of an edge + is assumed to be the multiplicative inverse of the length of the path. + Otherwise holds the name of the edge attribute used as weight. + """ + path_length = len(path) - 1 + if path_length <= 0: + return 0 + if weight is None: + return 1 / path_length + total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path)) + return total_weight / path_length + + +@nx._dispatch(edge_attrs="weight") +def global_reaching_centrality(G, weight=None, normalized=True): + """Returns the global reaching centrality of a directed graph. + + The *global reaching centrality* of a weighted directed graph is the + average over all nodes of the difference between the local reaching + centrality of the node and the greatest local reaching centrality of + any node in the graph [1]_. For more information on the local + reaching centrality, see :func:`local_reaching_centrality`. + Informally, the local reaching centrality is the proportion of the + graph that is reachable from the neighbors of the node. + + Parameters + ---------- + G : DiGraph + A networkx DiGraph. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If ``None``, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The global reaching centrality of the graph. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(1, 3) + >>> nx.global_reaching_centrality(G) + 1.0 + >>> G.add_edge(3, 2) + >>> nx.global_reaching_centrality(G) + 0.75 + + See also + -------- + local_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + + # If provided, weights must be interpreted as connection strength + # (so higher weights are more likely to be chosen). However, the + # shortest path algorithms in NetworkX assume the provided "weight" + # is actually a distance (so edges with higher weight are less + # likely to be chosen). Therefore we need to invert the weights when + # computing shortest paths. + # + # If weight is None, we leave it as-is so that the shortest path + # algorithm can use a faster, unweighted algorithm. + if weight is not None: + + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + shortest_paths = nx.shortest_path(G, weight=as_distance) + else: + shortest_paths = nx.shortest_path(G) + + centrality = local_reaching_centrality + # TODO This can be trivially parallelized. + lrc = [ + centrality(G, node, paths=paths, weight=weight, normalized=normalized) + for node, paths in shortest_paths.items() + ] + + max_lrc = max(lrc) + return sum(max_lrc - c for c in lrc) / (len(G) - 1) + + +@nx._dispatch(edge_attrs="weight") +def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True): + """Returns the local reaching centrality of a node in a directed + graph. + + The *local reaching centrality* of a node in a directed graph is the + proportion of other nodes reachable from that node [1]_. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph. + + v : node + A node in the directed graph `G`. + + paths : dictionary (default=None) + If this is not `None` it must be a dictionary representation + of single-source shortest paths, as computed by, for example, + :func:`networkx.shortest_path` with source node `v`. Use this + keyword argument if you intend to invoke this function many + times but don't want the paths to be recomputed each time. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If `None`, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The local reaching centrality of the node ``v`` in the graph + ``G``. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3)]) + >>> nx.local_reaching_centrality(G, 3) + 0.0 + >>> G.add_edge(3, 2) + >>> nx.local_reaching_centrality(G, 3) + 0.5 + + See also + -------- + global_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if paths is None: + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + if weight is not None: + # Interpret weights as lengths. + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + paths = nx.shortest_path(G, source=v, weight=as_distance) + else: + paths = nx.shortest_path(G, source=v) + # If the graph is unweighted, simply return the proportion of nodes + # reachable from the source node ``v``. + if weight is None and G.is_directed(): + return (len(paths) - 1) / (len(G) - 1) + if normalized and weight is not None: + norm = G.size(weight=weight) / G.size() + else: + norm = 1 + # TODO This can be trivially parallelized. + avgw = (_average_weight(G, path, weight=weight) for path in paths.values()) + sum_avg_weight = sum(avgw) / norm + return sum_avg_weight / (len(G) - 1) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..9351606de26547246c807a6f74ffa81c84448456 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b17e66cdeda8edb8d1dec72626c77f1f65dd4675e3f76dc2fc4fd84aa038e30 +size 18972