diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/__init__.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7ad2684aa79650e5349e97fabb961b477c53f937 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/__init__.pxd @@ -0,0 +1,187 @@ +##################################################################### +# +# These are the Cython pxd files for (most of) the Python/C API. +# +# REFERENCE COUNTING: +# +# JUST TO SCARE YOU: +# If you are going to use any of the Python/C API in your Cython +# program, you might be responsible for doing reference counting. +# Read http://docs.python.org/api/refcounts.html which is so +# important I've copied it below. +# +# For all the declaration below, whenever the Py_ function returns +# a *new reference* to a PyObject*, the return type is "object". +# When the function returns a borrowed reference, the return +# type is PyObject*. When Cython sees "object" as a return type +# it doesn't increment the reference count. When it sees PyObject* +# in order to use the result you must explicitly cast to , +# and when you do that Cython increments the reference count whether +# you want it to or not, forcing you to an explicit DECREF (or leak memory). +# To avoid this we make the above convention. Note, you can +# always locally override this convention by putting something like +# +# cdef extern from "Python.h": +# PyObject* PyNumber_Add(PyObject *o1, PyObject *o2) +# +# in your .pyx file or into a cimported .pxd file. You just have to +# use the one from the right (pxd-)namespace then. +# +# Cython automatically takes care of reference counting for anything +# of type object. +# +## More precisely, I think the correct convention for +## using the Python/C API from Cython is as follows. +## +## (1) Declare all input arguments as type "object". This way no explicit +## casting is needed, and moreover Cython doesn't generate +## any funny reference counting. +## (2) Declare output as object if a new reference is returned. +## (3) Declare output as PyObject* if a borrowed reference is returned. +## +## This way when you call objects, no cast is needed, and if the api +## calls returns a new reference (which is about 95% of them), then +## you can just assign to a variable of type object. With borrowed +## references if you do an explicit typecast to , Cython generates an +## INCREF and DECREF so you have to be careful. However, you got a +## borrowed reference in this case, so there's got to be another reference +## to your object, so you're OK, as long as you relealize this +## and use the result of an explicit cast to as a borrowed +## reference (and you can call Py_INCREF if you want to turn it +## into another reference for some reason). +# +# "The reference count is important because today's computers have +# a finite (and often severely limited) memory size; it counts how +# many different places there are that have a reference to an +# object. Such a place could be another object, or a global (or +# static) C variable, or a local variable in some C function. When +# an object's reference count becomes zero, the object is +# deallocated. If it contains references to other objects, their +# reference count is decremented. Those other objects may be +# deallocated in turn, if this decrement makes their reference +# count become zero, and so on. (There's an obvious problem with +# objects that reference each other here; for now, the solution is +# ``don't do that.'') +# +# Reference counts are always manipulated explicitly. The normal +# way is to use the macro Py_INCREF() to increment an object's +# reference count by one, and Py_DECREF() to decrement it by +# one. The Py_DECREF() macro is considerably more complex than the +# incref one, since it must check whether the reference count +# becomes zero and then cause the object's deallocator to be +# called. The deallocator is a function pointer contained in the +# object's type structure. The type-specific deallocator takes +# care of decrementing the reference counts for other objects +# contained in the object if this is a compound object type, such +# as a list, as well as performing any additional finalization +# that's needed. There's no chance that the reference count can +# overflow; at least as many bits are used to hold the reference +# count as there are distinct memory locations in virtual memory +# (assuming sizeof(long) >= sizeof(char*)). Thus, the reference +# count increment is a simple operation. +# +# It is not necessary to increment an object's reference count for +# every local variable that contains a pointer to an object. In +# theory, the object's reference count goes up by one when the +# variable is made to point to it and it goes down by one when the +# variable goes out of scope. However, these two cancel each other +# out, so at the end the reference count hasn't changed. The only +# real reason to use the reference count is to prevent the object +# from being deallocated as long as our variable is pointing to +# it. If we know that there is at least one other reference to the +# object that lives at least as long as our variable, there is no +# need to increment the reference count temporarily. An important +# situation where this arises is in objects that are passed as +# arguments to C functions in an extension module that are called +# from Python; the call mechanism guarantees to hold a reference +# to every argument for the duration of the call. +# +# However, a common pitfall is to extract an object from a list +# and hold on to it for a while without incrementing its reference +# count. Some other operation might conceivably remove the object +# from the list, decrementing its reference count and possible +# deallocating it. The real danger is that innocent-looking +# operations may invoke arbitrary Python code which could do this; +# there is a code path which allows control to flow back to the +# user from a Py_DECREF(), so almost any operation is potentially +# dangerous. +# +# A safe approach is to always use the generic operations +# (functions whose name begins with "PyObject_", "PyNumber_", +# "PySequence_" or "PyMapping_"). These operations always +# increment the reference count of the object they return. This +# leaves the caller with the responsibility to call Py_DECREF() +# when they are done with the result; this soon becomes second +# nature. +# +# Now you should read http://docs.python.org/api/refcountDetails.html +# just to be sure you understand what is going on. +# +################################################################# + + + +################################################################# +# BIG FAT DEPRECATION WARNING +################################################################# +# Do NOT cimport any names directly from the cpython package, +# despite of the star-imports below. They will be removed at +# some point. +# Instead, use the correct sub-module to draw your cimports from. +# +# A direct cimport from the package will make your code depend on +# all of the existing declarations. This may have side-effects +# and reduces the portability of your code. +################################################################# +# START OF DEPRECATED SECTION +################################################################# + +from cpython.version cimport * +from cpython.ref cimport * +from cpython.exc cimport * +from cpython.module cimport * +from cpython.mem cimport * +from cpython.tuple cimport * +from cpython.list cimport * +from cpython.object cimport * +from cpython.sequence cimport * +from cpython.mapping cimport * +from cpython.iterator cimport * +from cpython.type cimport * +from cpython.number cimport * +from cpython.int cimport * +from cpython.bool cimport * +from cpython.long cimport * +from cpython.float cimport * +from cpython.complex cimport * +from cpython.string cimport * +from cpython.unicode cimport * +from cpython.dict cimport * +from cpython.instance cimport * +from cpython.function cimport * +from cpython.method cimport * +from cpython.weakref cimport * +from cpython.getargs cimport * +from cpython.pythread cimport * +from cpython.pystate cimport * + +# Python <= 2.x +from cpython.cobject cimport * +from cpython.oldbuffer cimport * + +# Python >= 2.4 +from cpython.set cimport * + +# Python >= 2.6 +from cpython.buffer cimport * +from cpython.bytes cimport * + +# Python >= 3.0 +from cpython.pycapsule cimport * + +# Python >= 3.7 +from cpython.contextvars cimport * + +################################################################# +# END OF DEPRECATED SECTION +################################################################# diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/array.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/array.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c64e972775b1aa3cdb3db7607d3d20aca799515f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/array.pxd @@ -0,0 +1,174 @@ +""" + array.pxd + + Cython interface to Python's array.array module. + + * 1D contiguous data view + * tools for fast array creation, maximum C-speed and handiness + * suitable as allround light weight auto-array within Cython code too + + Usage: + + >>> cimport array + + Usage through Cython buffer interface (Py2.3+): + + >>> def f(arg1, unsigned i, double dx) + ... array.array[double] a = arg1 + ... a[i] += dx + + Fast C-level new_array(_zeros), resize_array, copy_array, Py_SIZE(obj), + zero_array + + cdef array.array[double] k = array.copy(d) + cdef array.array[double] n = array.array(d, Py_SIZE(d) * 2 ) + cdef array.array[double] m = array.zeros_like(FLOAT_TEMPLATE) + array.resize(f, 200000) + + Zero overhead with naked data pointer views by union: + _f, _d, _i, _c, _u, ... + => Original C array speed + Python dynamic memory management + + cdef array.array a = inarray + if + a._d[2] += 0.66 # use as double array without extra casting + + float *subview = vector._f + 10 # starting from 10th element + unsigned char *subview_buffer = vector._B + 4 + + Suitable as lightweight arrays intra Cython without speed penalty. + Replacement for C stack/malloc arrays; no trouble with refcounting, + mem.leaks; seamless Python compatibility, buffer() optional + + + last changes: 2009-05-15 rk + : 2009-12-06 bp + : 2012-05-02 andreasvc + : (see revision control) +""" + +cdef extern from *: + """ + #if CYTHON_COMPILING_IN_PYPY + #ifdef _MSC_VER + #pragma message ("This module uses CPython specific internals of 'array.array', which are not available in PyPy.") + #else + #warning This module uses CPython specific internals of 'array.array', which are not available in PyPy. + #endif + #endif + """ + +from libc.string cimport memset, memcpy + +from cpython.object cimport Py_SIZE +from cpython.ref cimport PyTypeObject, Py_TYPE +from cpython.exc cimport PyErr_BadArgument +from cpython.mem cimport PyObject_Malloc, PyObject_Free + +cdef extern from *: # Hard-coded utility code hack. + ctypedef class array.array [object arrayobject] + ctypedef object GETF(array a, Py_ssize_t ix) + ctypedef object SETF(array a, Py_ssize_t ix, object o) + ctypedef struct arraydescr: # [object arraydescr]: + char typecode + int itemsize + GETF getitem # PyObject * (*getitem)(struct arrayobject *, Py_ssize_t); + SETF setitem # int (*setitem)(struct arrayobject *, Py_ssize_t, PyObject *); + + ctypedef union __data_union: + # views of ob_item: + float* as_floats # direct float pointer access to buffer + double* as_doubles # double ... + int* as_ints + unsigned int *as_uints + unsigned char *as_uchars + signed char *as_schars + char *as_chars + unsigned long *as_ulongs + long *as_longs + unsigned long long *as_ulonglongs + long long *as_longlongs + short *as_shorts + unsigned short *as_ushorts + Py_UNICODE *as_pyunicodes + void *as_voidptr + + ctypedef class array.array [object arrayobject]: + cdef __cythonbufferdefaults__ = {'ndim' : 1, 'mode':'c'} + + cdef: + Py_ssize_t ob_size + arraydescr* ob_descr # struct arraydescr *ob_descr; + __data_union data + + def __getbuffer__(self, Py_buffer* info, int flags): + # This implementation of getbuffer is geared towards Cython + # requirements, and does not yet fulfill the PEP. + # In particular strided access is always provided regardless + # of flags + item_count = Py_SIZE(self) + + info.suboffsets = NULL + info.buf = self.data.as_chars + info.readonly = 0 + info.ndim = 1 + info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float) + info.len = info.itemsize * item_count + + info.shape = PyObject_Malloc(sizeof(Py_ssize_t) + 2) + if not info.shape: + raise MemoryError() + info.shape[0] = item_count # constant regardless of resizing + info.strides = &info.itemsize + + info.format = (info.shape + 1) + info.format[0] = self.ob_descr.typecode + info.format[1] = 0 + info.obj = self + + def __releasebuffer__(self, Py_buffer* info): + PyObject_Free(info.shape) + + array newarrayobject(PyTypeObject* type, Py_ssize_t size, arraydescr *descr) + + # fast resize/realloc + # not suitable for small increments; reallocation 'to the point' + int resize(array self, Py_ssize_t n) except -1 + # efficient for small increments (not in Py2.3-) + int resize_smart(array self, Py_ssize_t n) except -1 + + +cdef inline array clone(array template, Py_ssize_t length, bint zero): + """ fast creation of a new array, given a template array. + type will be same as template. + if zero is true, new array will be initialized with zeroes.""" + cdef array op = newarrayobject(Py_TYPE(template), length, template.ob_descr) + if zero and op is not None: + memset(op.data.as_chars, 0, length * op.ob_descr.itemsize) + return op + +cdef inline array copy(array self): + """ make a copy of an array. """ + cdef array op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr) + memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize) + return op + +cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1: + """ efficient appending of new stuff of same type + (e.g. of same array type) + n: number of elements (not number of bytes!) """ + cdef Py_ssize_t itemsize = self.ob_descr.itemsize + cdef Py_ssize_t origsize = Py_SIZE(self) + resize_smart(self, origsize + n) + memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize) + return 0 + +cdef inline int extend(array self, array other) except -1: + """ extend array with data from another array; types must match. """ + if self.ob_descr.typecode != other.ob_descr.typecode: + PyErr_BadArgument() + return extend_buffer(self, other.data.as_chars, Py_SIZE(other)) + +cdef inline void zero(array self) noexcept: + """ set all elements of array to zero. """ + memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/cellobject.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/cellobject.pxd new file mode 100644 index 0000000000000000000000000000000000000000..5e3dd3d63c4440693ee72baa02800807c7e49c3f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/cellobject.pxd @@ -0,0 +1,35 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + + ############################################################################ + # Cell Objects + ############################################################################ + + bint PyCell_Check(object ob) + # Return true if ob is a cell object; ob must not be NULL. + + object PyCell_New(PyObject* ob) + # Return value: New reference. + # Create and return a new cell object containing the value ob. The + # parameter may be NULL. + + object PyCell_Get(object cell) + # Return value: New reference. + # Return the contents of the cell object cell. + + object PyCell_GET(object cell) + # Return value: Borrowed reference. + # Return the contents of the cell object cell, but without checking that + # cell is non-NULL and is a cell object. + + int PyCell_Set(object cell, PyObject* value) except? -1 + # Set the contents of the cell object cell to value. This releases the + # reference to any current content of the cell. value may be NULL. cell + # must be non-NULL; if it is not a cell object, -1 will be returned. On + # success, 0 will be returned. + + void PyCell_SET(object cell, PyObject* value) + # Sets the value of the cell object cell to value. No reference counts are + # adjusted, and no checks are made for safety; cell must be non-NULL and + # must be a cell object. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/complex.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/complex.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c0147547ce3db8ad6ba6b33e6310f30275e09abe --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/complex.pxd @@ -0,0 +1,55 @@ + +cdef extern from "Python.h": + + ctypedef struct Py_complex: + double imag + double real + + ############################################################################ + # 7.2.5.2 Complex Numbers as Python Objects + ############################################################################ + + # PyComplexObject + # This subtype of PyObject represents a Python complex number object. + + ctypedef class __builtin__.complex [object PyComplexObject]: + cdef Py_complex cval + + @property + cdef inline double real(self) noexcept: + return self.cval.real + + @property + cdef inline double imag(self) noexcept: + return self.cval.imag + + # PyTypeObject PyComplex_Type + # This instance of PyTypeObject represents the Python complex + # number type. It is the same object as complex and + # types.ComplexType. + + bint PyComplex_Check(object p) + # Return true if its argument is a PyComplexObject or a subtype of + # PyComplexObject. + + bint PyComplex_CheckExact(object p) + # Return true if its argument is a PyComplexObject, but not a subtype of PyComplexObject. + + object PyComplex_FromCComplex(Py_complex v) + # Return value: New reference. + # Create a new Python complex number object from a C Py_complex value. + + object PyComplex_FromDoubles(double real, double imag) + # Return value: New reference. + # Return a new PyComplexObject object from real and imag. + + double PyComplex_RealAsDouble(object op) except? -1 + # Return the real part of op as a C double. + + double PyComplex_ImagAsDouble(object op) except? -1 + # Return the imaginary part of op as a C double. + + Py_complex PyComplex_AsCComplex(object op) + # Return the Py_complex value of the complex number op. + # + # Returns (-1+0i) in case of an error diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/exc.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/exc.pxd new file mode 100644 index 0000000000000000000000000000000000000000..756342ad3af47c7d97301f91505383b2c3d432a7 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/exc.pxd @@ -0,0 +1,263 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + + ##################################################################### + # 3. Exception Handling + ##################################################################### + + # The functions described in this chapter will let you handle and + # raise Python exceptions. It is important to understand some of + # the basics of Python exception handling. It works somewhat like + # the Unix errno variable: there is a global indicator (per + # thread) of the last error that occurred. Most functions don't + # clear this on success, but will set it to indicate the cause of + # the error on failure. Most functions also return an error + # indicator, usually NULL if they are supposed to return a + # pointer, or -1 if they return an integer (exception: the + # PyArg_*() functions return 1 for success and 0 for failure). + + # When a function must fail because some function it called + # failed, it generally doesn't set the error indicator; the + # function it called already set it. It is responsible for either + # handling the error and clearing the exception or returning after + # cleaning up any resources it holds (such as object references or + # memory allocations); it should not continue normally if it is + # not prepared to handle the error. If returning due to an error, + # it is important to indicate to the caller that an error has been + # set. If the error is not handled or carefully propagated, + # additional calls into the Python/C API may not behave as + # intended and may fail in mysterious ways. + + # The error indicator consists of three Python objects + # corresponding to the Python variables sys.exc_type, + # sys.exc_value and sys.exc_traceback. API functions exist to + # interact with the error indicator in various ways. There is a + # separate error indicator for each thread. + + void PyErr_Print() + # Print a standard traceback to sys.stderr and clear the error + # indicator. Call this function only when the error indicator is + # set. (Otherwise it will cause a fatal error!) + + PyObject* PyErr_Occurred() + # Return value: Borrowed reference. + # Test whether the error indicator is set. If set, return the + # exception type (the first argument to the last call to one of + # the PyErr_Set*() functions or to PyErr_Restore()). If not set, + # return NULL. You do not own a reference to the return value, so + # you do not need to Py_DECREF() it. Note: Do not compare the + # return value to a specific exception; use + # PyErr_ExceptionMatches() instead, shown below. (The comparison + # could easily fail since the exception may be an instance instead + # of a class, in the case of a class exception, or it may be a + # subclass of the expected exception.) + + bint PyErr_ExceptionMatches(object exc) + # Equivalent to "PyErr_GivenExceptionMatches(PyErr_Occurred(), + # exc)". This should only be called when an exception is actually + # set; a memory access violation will occur if no exception has + # been raised. + + bint PyErr_GivenExceptionMatches(object given, object exc) + # Return true if the given exception matches the exception in + # exc. If exc is a class object, this also returns true when given + # is an instance of a subclass. If exc is a tuple, all exceptions + # in the tuple (and recursively in subtuples) are searched for a + # match. If given is NULL, a memory access violation will occur. + + void PyErr_NormalizeException(PyObject** exc, PyObject** val, PyObject** tb) + # Under certain circumstances, the values returned by + # PyErr_Fetch() below can be ``unnormalized'', meaning that *exc + # is a class object but *val is not an instance of the same + # class. This function can be used to instantiate the class in + # that case. If the values are already normalized, nothing + # happens. The delayed normalization is implemented to improve + # performance. + + void PyErr_Clear() + # Clear the error indicator. If the error indicator is not set, there is no effect. + + void PyErr_Fetch(PyObject** ptype, PyObject** pvalue, PyObject** ptraceback) + # Retrieve the error indicator into three variables whose + # addresses are passed. If the error indicator is not set, set all + # three variables to NULL. If it is set, it will be cleared and + # you own a reference to each object retrieved. The value and + # traceback object may be NULL even when the type object is + # not. Note: This function is normally only used by code that + # needs to handle exceptions or by code that needs to save and + # restore the error indicator temporarily. + + PyObject* PyErr_GetHandledException() + void PyErr_SetHandledException(PyObject* exc) + PyObject* PyErr_GetRaisedException() + void PyErr_SetRaisedException(PyObject* exc) + + void PyErr_Restore(PyObject* type, PyObject* value, PyObject* traceback) + # Set the error indicator from the three objects. If the error + # indicator is already set, it is cleared first. If the objects + # are NULL, the error indicator is cleared. Do not pass a NULL + # type and non-NULL value or traceback. The exception type should + # be a class. Do not pass an invalid exception type or + # value. (Violating these rules will cause subtle problems later.) + # This call takes away a reference to each object: you must own a + # reference to each object before the call and after the call you + # no longer own these references. (If you don't understand this, + # don't use this function. I warned you.) Note: This function is + # normally only used by code that needs to save and restore the + # error indicator temporarily; use PyErr_Fetch() to save the + # current exception state. + + void PyErr_SetString(object type, char *message) + # This is the most common way to set the error indicator. The + # first argument specifies the exception type; it is normally one + # of the standard exceptions, e.g. PyExc_RuntimeError. You need + # not increment its reference count. The second argument is an + # error message; it is converted to a string object. + + void PyErr_SetObject(object type, object value) + # This function is similar to PyErr_SetString() but lets you + # specify an arbitrary Python object for the ``value'' of the + # exception. + + PyObject* PyErr_Format(object exception, char *format, ...) except NULL + # Return value: Always NULL. + # This function sets the error indicator and returns + # NULL. exception should be a Python exception (class, not an + # instance). format should be a string, containing format codes, + # similar to printf(). The width.precision before a format code is + # parsed, but the width part is ignored. + + void PyErr_SetNone(object type) + # This is a shorthand for "PyErr_SetObject(type, Py_None)". + + int PyErr_BadArgument() except 0 + + # This is a shorthand for "PyErr_SetString(PyExc_TypeError, + # message)", where message indicates that a built-in operation was + # invoked with an illegal argument. It is mostly for internal use. + + PyObject* PyErr_NoMemory() except NULL + # Return value: Always NULL. + # This is a shorthand for "PyErr_SetNone(PyExc_MemoryError)"; it + # returns NULL so an object allocation function can write "return + # PyErr_NoMemory();" when it runs out of memory. + + PyObject* PyErr_SetFromErrno(object type) except NULL + # Return value: Always NULL. + # This is a convenience function to raise an exception when a C + # library function has returned an error and set the C variable + # errno. It constructs a tuple object whose first item is the + # integer errno value and whose second item is the corresponding + # error message (gotten from strerror()), and then calls + # "PyErr_SetObject(type, object)". On Unix, when the errno value + # is EINTR, indicating an interrupted system call, this calls + # PyErr_CheckSignals(), and if that set the error indicator, + # leaves it set to that. The function always returns NULL, so a + # wrapper function around a system call can write "return + # PyErr_SetFromErrno(type);" when the system call returns an + # error. + + PyObject* PyErr_SetFromErrnoWithFilenameObject(object type, object filenameObject) except NULL + # Similar to PyErr_SetFromErrno(), with the additional behavior + # that if filenameObject is not NULL, it is passed to the + # constructor of type as a third parameter. + # In the case of OSError exception, this is used to define + # the filename attribute of the exception instance. + + PyObject* PyErr_SetFromErrnoWithFilename(object type, char *filename) except NULL + # Return value: Always NULL. Similar to PyErr_SetFromErrno(), + # with the additional behavior that if filename is not NULL, it is + # passed to the constructor of type as a third parameter. In the + # case of exceptions such as IOError and OSError, this is used to + # define the filename attribute of the exception instance. + + PyObject* PyErr_SetFromWindowsErr(int ierr) except NULL + # Return value: Always NULL. This is a convenience function to + # raise WindowsError. If called with ierr of 0, the error code + # returned by a call to GetLastError() is used instead. It calls + # the Win32 function FormatMessage() to retrieve the Windows + # description of error code given by ierr or GetLastError(), then + # it constructs a tuple object whose first item is the ierr value + # and whose second item is the corresponding error message (gotten + # from FormatMessage()), and then calls + # "PyErr_SetObject(PyExc_WindowsError, object)". This function + # always returns NULL. Availability: Windows. + + PyObject* PyErr_SetExcFromWindowsErr(object type, int ierr) except NULL + # Return value: Always NULL. Similar to + # PyErr_SetFromWindowsErr(), with an additional parameter + # specifying the exception type to be raised. Availability: + # Windows. New in version 2.3. + + PyObject* PyErr_SetFromWindowsErrWithFilename(int ierr, char *filename) except NULL + # Return value: Always NULL. Similar to + # PyErr_SetFromWindowsErr(), with the additional behavior that if + # filename is not NULL, it is passed to the constructor of + # WindowsError as a third parameter. Availability: Windows. + + PyObject* PyErr_SetExcFromWindowsErrWithFilename(object type, int ierr, char *filename) except NULL + # Return value: Always NULL. + # Similar to PyErr_SetFromWindowsErrWithFilename(), with an + # additional parameter specifying the exception type to be + # raised. Availability: Windows. + + void PyErr_BadInternalCall() + # This is a shorthand for "PyErr_SetString(PyExc_TypeError, + # message)", where message indicates that an internal operation + # (e.g. a Python/C API function) was invoked with an illegal + # argument. It is mostly for internal use. + + int PyErr_WarnEx(object category, char *message, int stacklevel) except -1 + # Issue a warning message. The category argument is a warning + # category (see below) or NULL; the message argument is a message + # string. stacklevel is a positive number giving a number of stack + # frames; the warning will be issued from the currently executing + # line of code in that stack frame. A stacklevel of 1 is the + # function calling PyErr_WarnEx(), 2 is the function above that, + # and so forth. + + int PyErr_WarnExplicit(object category, char *message, char *filename, int lineno, char *module, object registry) except -1 + # Issue a warning message with explicit control over all warning + # attributes. This is a straightforward wrapper around the Python + # function warnings.warn_explicit(), see there for more + # information. The module and registry arguments may be set to + # NULL to get the default effect described there. + + int PyErr_CheckSignals() except -1 + # This function interacts with Python's signal handling. It checks + # whether a signal has been sent to the processes and if so, + # invokes the corresponding signal handler. If the signal module + # is supported, this can invoke a signal handler written in + # Python. In all cases, the default effect for SIGINT is to raise + # the KeyboardInterrupt exception. If an exception is raised the + # error indicator is set and the function returns 1; otherwise the + # function returns 0. The error indicator may or may not be + # cleared if it was previously set. + + void PyErr_SetInterrupt() nogil + # This function simulates the effect of a SIGINT signal arriving + # -- the next time PyErr_CheckSignals() is called, + # KeyboardInterrupt will be raised. It may be called without + # holding the interpreter lock. + + int PyErr_SetInterruptEx(int signum) + + object PyErr_NewException(char *name, object base, object dict) + # Return value: New reference. + # This utility function creates and returns a new exception + # object. The name argument must be the name of the new exception, + # a C string of the form module.class. The base and dict arguments + # are normally NULL. This creates a class object derived from + # Exception (accessible in C as PyExc_Exception). + + void PyErr_WriteUnraisable(object obj) + # This utility function prints a warning message to sys.stderr + # when an exception has been set but it is impossible for the + # interpreter to actually raise the exception. It is used, for + # example, when an exception occurs in an __del__() method. + # + # The function is called with a single argument obj that + # identifies the context in which the unraisable exception + # occurred. The repr of obj will be printed in the warning + # message. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/float.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/float.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7c567a80fcc4268aa39a2c78beb56a80ef046f2c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/float.pxd @@ -0,0 +1,46 @@ +cdef extern from "Python.h": + """ + #if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyFloat_FromString(obj) PyFloat_FromString(obj) + #else + #define __Pyx_PyFloat_FromString(obj) PyFloat_FromString(obj, NULL) + #endif + """ + + ############################################################################ + # 7.2.3 + ############################################################################ + # PyFloatObject + # + # This subtype of PyObject represents a Python floating point object. + + # PyTypeObject PyFloat_Type + # + # This instance of PyTypeObject represents the Python floating + # point type. This is the same object as float and + # types.FloatType. + + bint PyFloat_Check(object p) + # Return true if its argument is a PyFloatObject or a subtype of + # PyFloatObject. + + bint PyFloat_CheckExact(object p) + # Return true if its argument is a PyFloatObject, but not a + # subtype of PyFloatObject. + + object PyFloat_FromString "__Pyx_PyFloat_FromString" (object str) + # Return value: New reference. + # Create a PyFloatObject object based on the string value in str, + # or NULL on failure. The pend argument is ignored. It remains + # only for backward compatibility. + + object PyFloat_FromDouble(double v) + # Return value: New reference. + # Create a PyFloatObject object from v, or NULL on failure. + + double PyFloat_AsDouble(object pyfloat) except? -1 + # Return a C double representation of the contents of pyfloat. + + double PyFloat_AS_DOUBLE(object pyfloat) + # Return a C double representation of the contents of pyfloat, but + # without error checking. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/function.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/function.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0002a3f6cbc426dc05dbdfce454da44483fbfa3e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/function.pxd @@ -0,0 +1,65 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + + ############################################################################ + # 7.5.3 Function Objects + ############################################################################ + # There are a few functions specific to Python functions. + + # PyFunctionObject + # + # The C structure used for functions. + + # PyTypeObject PyFunction_Type + # + # This is an instance of PyTypeObject and represents the Python + # function type. It is exposed to Python programmers as + # types.FunctionType. + + bint PyFunction_Check(object o) + # Return true if o is a function object (has type + # PyFunction_Type). The parameter must not be NULL. + + object PyFunction_New(object code, object globals) + # Return value: New reference. + # Return a new function object associated with the code object + # code. globals must be a dictionary with the global variables + # accessible to the function. + # The function's docstring, name and __module__ are retrieved from + # the code object, the argument defaults and closure are set to + # NULL. + + PyObject* PyFunction_GetCode(object op) except? NULL + # Return value: Borrowed reference. + # Return the code object associated with the function object op. + + PyObject* PyFunction_GetGlobals(object op) except? NULL + # Return value: Borrowed reference. + # Return the globals dictionary associated with the function object op. + + PyObject* PyFunction_GetModule(object op) except? NULL + # Return value: Borrowed reference. + # Return the __module__ attribute of the function object op. This + # is normally a string containing the module name, but can be set + # to any other object by Python code. + + PyObject* PyFunction_GetDefaults(object op) except? NULL + # Return value: Borrowed reference. + # Return the argument default values of the function object + # op. This can be a tuple of arguments or NULL. + + int PyFunction_SetDefaults(object op, object defaults) except -1 + # Set the argument default values for the function object + # op. defaults must be Py_None or a tuple. + # Raises SystemError and returns -1 on failure. + + PyObject* PyFunction_GetClosure(object op) except? NULL + # Return value: Borrowed reference. + # Return the closure associated with the function object op. This + # can be NULL or a tuple of cell objects. + + int PyFunction_SetClosure(object op, object closure) except -1 + # Set the closure associated with the function object op. closure + # must be Py_None or a tuple of cell objects. + # Raises SystemError and returns -1 on failure. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/int.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/int.pxd new file mode 100644 index 0000000000000000000000000000000000000000..50babff61514ad2fef33f4c1bad6e7393a113a38 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/int.pxd @@ -0,0 +1,89 @@ +cdef extern from "Python.h": + ctypedef unsigned long long PY_LONG_LONG + + ############################################################################ + # Integer Objects + ############################################################################ + # PyTypeObject PyInt_Type + # This instance of PyTypeObject represents the Python plain + # integer type. This is the same object as int and types.IntType. + + bint PyInt_Check(object o) + # Return true if o is of type PyInt_Type or a subtype of + # PyInt_Type. + + bint PyInt_CheckExact(object o) + # Return true if o is of type PyInt_Type, but not a subtype of + # PyInt_Type. + + object PyInt_FromString(char *str, char **pend, int base) + # Return value: New reference. + # Return a new PyIntObject or PyLongObject based on the string + # value in str, which is interpreted according to the radix in + # base. If pend is non-NULL, *pend will point to the first + # character in str which follows the representation of the + # number. If base is 0, the radix will be determined based on the + # leading characters of str: if str starts with '0x' or '0X', + # radix 16 will be used; if str starts with '0', radix 8 will be + # used; otherwise radix 10 will be used. If base is not 0, it must + # be between 2 and 36, inclusive. Leading spaces are ignored. If + # there are no digits, ValueError will be raised. If the string + # represents a number too large to be contained within the + # machine's long int type and overflow warnings are being + # suppressed, a PyLongObject will be returned. If overflow + # warnings are not being suppressed, NULL will be returned in this + # case. + + object PyInt_FromLong(long ival) + # Return value: New reference. + # Create a new integer object with a value of ival. + # The current implementation keeps an array of integer objects for + # all integers between -5 and 256, when you create an int in that + # range you actually just get back a reference to the existing + # object. So it should be possible to change the value of 1. I + # suspect the behaviour of Python in this case is undefined. :-) + + object PyInt_FromSsize_t(Py_ssize_t ival) + # Return value: New reference. + # Create a new integer object with a value of ival. If the value + # is larger than LONG_MAX or smaller than LONG_MIN, a long integer + # object is returned. + + object PyInt_FromSize_t(size_t ival) + # Return value: New reference. + # Create a new integer object with a value of ival. If the value + # exceeds LONG_MAX, a long integer object is returned. + + long PyInt_AsLong(object io) except? -1 + # Will first attempt to cast the object to a PyIntObject, if it is + # not already one, and then return its value. If there is an + # error, -1 is returned, and the caller should check + # PyErr_Occurred() to find out whether there was an error, or + # whether the value just happened to be -1. + + long PyInt_AS_LONG(object io) + # Return the value of the object io. No error checking is performed. + + unsigned long PyInt_AsUnsignedLongMask(object io) except? -1 + # Will first attempt to cast the object to a PyIntObject or + # PyLongObject, if it is not already one, and then return its + # value as unsigned long. This function does not check for + # overflow. + + PY_LONG_LONG PyInt_AsUnsignedLongLongMask(object io) except? -1 + # Will first attempt to cast the object to a PyIntObject or + # PyLongObject, if it is not already one, and then return its + # value as unsigned long long, without checking for overflow. + + Py_ssize_t PyInt_AsSsize_t(object io) except? -1 + # Will first attempt to cast the object to a PyIntObject or + # PyLongObject, if it is not already one, and then return its + # value as Py_ssize_t. + + long PyInt_GetMax() + # Return the system's idea of the largest integer it can handle + # (LONG_MAX, as defined in the system header files). + + int PyInt_ClearFreeList() + # Clear the integer free list. Return the number of items that could not be freed. + # New in version 2.6. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/iterobject.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/iterobject.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a70aeccb097f3b8ff2b93d31e7752cbf3008dd88 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/iterobject.pxd @@ -0,0 +1,24 @@ +cdef extern from "Python.h": + + ########################################################################### + # Iterator Objects + ########################################################################### + + bint PySeqIter_Check(object op) + # Return true if the type of op is PySeqIter_Type. + + object PySeqIter_New(object seq) + # Return value: New reference. + # Return an iterator that works with a general sequence object, seq. The + # iteration ends when the sequence raises IndexError for the subscripting + # operation. + + bint PyCallIter_Check(object op) + # Return true if the type of op is PyCallIter_Type. + + object PyCallIter_New(object callable, object sentinel) + # Return value: New reference. + # Return a new iterator. The first parameter, callable, can be any Python + # callable object that can be called with no parameters; each call to it + # should return the next item in the iteration. When callable returns a + # value equal to sentinel, the iteration will be terminated. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/list.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/list.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1d0503c2c1afa11afea3138fac4b4b20d5178624 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/list.pxd @@ -0,0 +1,92 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + + ############################################################################ + # Lists + ############################################################################ + list PyList_New(Py_ssize_t len) + # Return a new list of length len on success, or NULL on failure. + # + # Note: If length is greater than zero, the returned list object's + # items are set to NULL. Thus you cannot use abstract API + # functions such as PySequence_SetItem() or expose the object to + # Python code before setting all items to a real object with + # PyList_SetItem(). + + bint PyList_Check(object p) + # Return true if p is a list object or an instance of a subtype of + # the list type. + + bint PyList_CheckExact(object p) + # Return true if p is a list object, but not an instance of a + # subtype of the list type. + + Py_ssize_t PyList_Size(object list) except -1 + # Return the length of the list object in list; this is equivalent + # to "len(list)" on a list object. + + Py_ssize_t PyList_GET_SIZE(object list) + # Macro form of PyList_Size() without error checking. + + PyObject* PyList_GetItem(object list, Py_ssize_t index) except NULL + # Return value: Borrowed reference. + # Return the object at position pos in the list pointed to by + # p. The position must be positive, indexing from the end of the + # list is not supported. If pos is out of bounds, return NULL and + # set an IndexError exception. + + PyObject* PyList_GET_ITEM(object list, Py_ssize_t i) + # Return value: Borrowed reference. + # Macro form of PyList_GetItem() without error checking. + + int PyList_SetItem(object list, Py_ssize_t index, object item) except -1 + # Set the item at index index in list to item. Return 0 on success + # or -1 on failure. + # + # WARNING: This function _steals_ a reference to item and discards a + # reference to an item already in the list at the affected position. + + void PyList_SET_ITEM(object list, Py_ssize_t i, object o) + # Macro form of PyList_SetItem() without error checking. This is + # normally only used to fill in new lists where there is no + # previous content. + # + # WARNING: This function _steals_ a reference to item, and, unlike + # PyList_SetItem(), does not discard a reference to any item that + # it being replaced; any reference in list at position i will be *leaked*. + + int PyList_Insert(object list, Py_ssize_t index, object item) except -1 + # Insert the item item into list list in front of index + # index. Return 0 if successful; return -1 and set an exception if + # unsuccessful. Analogous to list.insert(index, item). + + int PyList_Append(object list, object item) except -1 + # Append the object item at the end of list list. Return 0 if + # successful; return -1 and set an exception if + # unsuccessful. Analogous to list.append(item). + + list PyList_GetSlice(object list, Py_ssize_t low, Py_ssize_t high) + # Return value: New reference. + # Return a list of the objects in list containing the objects + # between low and high. Return NULL and set an exception if + # unsuccessful. Analogous to list[low:high]. + + int PyList_SetSlice(object list, Py_ssize_t low, Py_ssize_t high, object itemlist) except -1 + # Set the slice of list between low and high to the contents of + # itemlist. Analogous to list[low:high] = itemlist. The itemlist + # may be NULL, indicating the assignment of an empty list (slice + # deletion). Return 0 on success, -1 on failure. + + int PyList_Sort(object list) except -1 + # Sort the items of list in place. Return 0 on success, -1 on + # failure. This is equivalent to "list.sort()". + + int PyList_Reverse(object list) except -1 + # Reverse the items of list in place. Return 0 on success, -1 on + # failure. This is the equivalent of "list.reverse()". + + tuple PyList_AsTuple(object list) + # Return value: New reference. + # Return a new tuple object containing the contents of list; + # equivalent to "tuple(list)". diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/long.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/long.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f65cd0073ac43b4b7f560f76c85defbb4e4e9809 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/long.pxd @@ -0,0 +1,149 @@ + +cdef extern from "Python.h": + ctypedef long long PY_LONG_LONG + ctypedef unsigned long long uPY_LONG_LONG "unsigned PY_LONG_LONG" + + ############################################################################ + # 7.2.3 Long Integer Objects + ############################################################################ + + # PyLongObject + # + # This subtype of PyObject represents a Python long integer object. + + # PyTypeObject PyLong_Type + # + # This instance of PyTypeObject represents the Python long integer + # type. This is the same object as long and types.LongType. + + bint PyLong_Check(object p) + # Return true if its argument is a PyLongObject or a subtype of PyLongObject. + + bint PyLong_CheckExact(object p) + # Return true if its argument is a PyLongObject, but not a subtype of PyLongObject. + + object PyLong_FromLong(long v) + # Return value: New reference. + # Return a new PyLongObject object from v, or NULL on failure. + + object PyLong_FromUnsignedLong(unsigned long v) + # Return value: New reference. + # Return a new PyLongObject object from a C unsigned long, or NULL on failure. + + object PyLong_FromSsize_t(Py_ssize_t v) + # Return value: New reference. + # Return a new PyLongObject object from a C Py_ssize_t, or NULL on failure.) + + object PyLong_FromSize_t(size_t v) + # Return value: New reference. + # Return a new PyLongObject object from a C size_t, or NULL on failure. + + object PyLong_FromLongLong(PY_LONG_LONG v) + # Return value: New reference. + # Return a new PyLongObject object from a C long long, or NULL on failure. + + object PyLong_FromUnsignedLongLong(uPY_LONG_LONG v) + # Return value: New reference. + # Return a new PyLongObject object from a C unsigned long long, or NULL on failure. + + object PyLong_FromDouble(double v) + # Return value: New reference. + # Return a new PyLongObject object from the integer part of v, or NULL on failure. + + object PyLong_FromString(char *str, char **pend, int base) + # Return value: New reference. + # Return a new PyLongObject based on the string value in str, + # which is interpreted according to the radix in base. If pend is + # non-NULL, *pend will point to the first character in str which + # follows the representation of the number. If base is 0, the + # radix will be determined based on the leading characters of str: + # if str starts with '0x' or '0X', radix 16 will be used; if str + # starts with '0', radix 8 will be used; otherwise radix 10 will + # be used. If base is not 0, it must be between 2 and 36, + # inclusive. Leading spaces are ignored. If there are no digits, + # ValueError will be raised. + + object PyLong_FromUnicode(Py_UNICODE *u, Py_ssize_t length, int base) + # Return value: New reference. + # Convert a sequence of Unicode digits to a Python long integer + # value. The first parameter, u, points to the first character of + # the Unicode string, length gives the number of characters, and + # base is the radix for the conversion. The radix must be in the + # range [2, 36]; if it is out of range, ValueError will be + # raised. + + # object PyLong_FromUnicodeObject(object u, int base) + # Convert a sequence of Unicode digits in the string u to a Python integer + # value. The Unicode string is first encoded to a byte string using + # PyUnicode_EncodeDecimal() and then converted using PyLong_FromString(). + # New in version 3.3. + + object PyLong_FromVoidPtr(void *p) + # Return value: New reference. + # Create a Python integer or long integer from the pointer p. The + # pointer value can be retrieved from the resulting value using + # PyLong_AsVoidPtr(). If the integer is larger than LONG_MAX, a + # positive long integer is returned. + + long PyLong_AsLong(object pylong) except? -1 + # Return a C long representation of the contents of pylong. If + # pylong is greater than LONG_MAX, an OverflowError is raised. + + long PyLong_AsLongAndOverflow(object pylong, int *overflow) except? -1 + # Return a C long representation of the contents of pylong. If pylong is + # greater than LONG_MAX or less than LONG_MIN, set *overflow to 1 or -1, + # respectively, and return -1; otherwise, set *overflow to 0. If any other + # exception occurs (for example a TypeError or MemoryError), then -1 will + # be returned and *overflow will be 0. + # New in version 2.7. + + PY_LONG_LONG PyLong_AsLongLongAndOverflow(object pylong, int *overflow) except? -1 + # Return a C long long representation of the contents of pylong. If pylong + # is greater than PY_LLONG_MAX or less than PY_LLONG_MIN, set *overflow to + # 1 or -1, respectively, and return -1; otherwise, set *overflow to 0. If + # any other exception occurs (for example a TypeError or MemoryError), then + # -1 will be returned and *overflow will be 0. + # New in version 2.7. + + Py_ssize_t PyLong_AsSsize_t(object pylong) except? -1 + # Return a C Py_ssize_t representation of the contents of pylong. If pylong + # is greater than PY_SSIZE_T_MAX, an OverflowError is raised and -1 will be + # returned. + + unsigned long PyLong_AsUnsignedLong(object pylong) except? -1 + # Return a C unsigned long representation of the contents of + # pylong. If pylong is greater than ULONG_MAX, an OverflowError is + # raised. + + PY_LONG_LONG PyLong_AsLongLong(object pylong) except? -1 + # Return a C long long from a Python long integer. If pylong + # cannot be represented as a long long, an OverflowError will be + # raised. + + uPY_LONG_LONG PyLong_AsUnsignedLongLong(object pylong) except? -1 + #unsigned PY_LONG_LONG PyLong_AsUnsignedLongLong(object pylong) + # Return a C unsigned long long from a Python long integer. If + # pylong cannot be represented as an unsigned long long, an + # OverflowError will be raised if the value is positive, or a + # TypeError will be raised if the value is negative. + + unsigned long PyLong_AsUnsignedLongMask(object io) except? -1 + # Return a C unsigned long from a Python long integer, without + # checking for overflow. + + uPY_LONG_LONG PyLong_AsUnsignedLongLongMask(object io) except? -1 + #unsigned PY_LONG_LONG PyLong_AsUnsignedLongLongMask(object io) + # Return a C unsigned long long from a Python long integer, + # without checking for overflow. + + double PyLong_AsDouble(object pylong) except? -1.0 + # Return a C double representation of the contents of pylong. If + # pylong cannot be approximately represented as a double, an + # OverflowError exception is raised and -1.0 will be returned. + + void* PyLong_AsVoidPtr(object pylong) except? NULL + # Convert a Python integer or long integer pylong to a C void + # pointer. If pylong cannot be converted, an OverflowError will be + # raised. This is only assured to produce a usable void pointer + # for values created with PyLong_FromVoidPtr(). For values outside + # 0..LONG_MAX, both signed and unsigned integers are accepted. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/mem.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/mem.pxd new file mode 100644 index 0000000000000000000000000000000000000000..236d111f6ff8d9b436c48eab131bcedc7a35848e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/mem.pxd @@ -0,0 +1,120 @@ +cdef extern from "Python.h": + + ##################################################################### + # 9.2 Memory Interface + ##################################################################### + # You are definitely *supposed* to use these: "In most situations, + # however, it is recommended to allocate memory from the Python + # heap specifically because the latter is under control of the + # Python memory manager. For example, this is required when the + # interpreter is extended with new object types written in + # C. Another reason for using the Python heap is the desire to + # inform the Python memory manager about the memory needs of the + # extension module. Even when the requested memory is used + # exclusively for internal, highly-specific purposes, delegating + # all memory requests to the Python memory manager causes the + # interpreter to have a more accurate image of its memory + # footprint as a whole. Consequently, under certain circumstances, + # the Python memory manager may or may not trigger appropriate + # actions, like garbage collection, memory compaction or other + # preventive procedures. Note that by using the C library + # allocator as shown in the previous example, the allocated memory + # for the I/O buffer escapes completely the Python memory + # manager." + + # The following function sets, modeled after the ANSI C standard, + # but specifying behavior when requesting zero bytes, are + # available for allocating and releasing memory from the Python + # heap: + + void* PyMem_RawMalloc(size_t n) nogil + void* PyMem_Malloc(size_t n) + # Allocates n bytes and returns a pointer of type void* to the + # allocated memory, or NULL if the request fails. Requesting zero + # bytes returns a distinct non-NULL pointer if possible, as if + # PyMem_Malloc(1) had been called instead. The memory will not + # have been initialized in any way. + + void* PyMem_RawCalloc(size_t nelem, size_t elsize) nogil + void* PyMem_Calloc(size_t nelem, size_t elsize) + # Allocates nelem elements each whose size in bytes is elsize and + # returns a pointer of type void* to the allocated memory, or NULL if + # the request fails. The memory is initialized to zeros. Requesting + # zero elements or elements of size zero bytes returns a distinct + # non-NULL pointer if possible, as if PyMem_Calloc(1, 1) had been + # called instead. + + void* PyMem_RawRealloc(void *p, size_t n) nogil + void* PyMem_Realloc(void *p, size_t n) + # Resizes the memory block pointed to by p to n bytes. The + # contents will be unchanged to the minimum of the old and the new + # sizes. If p is NULL, the call is equivalent to PyMem_Malloc(n); + # else if n is equal to zero, the memory block is resized but is + # not freed, and the returned pointer is non-NULL. Unless p is + # NULL, it must have been returned by a previous call to + # PyMem_Malloc(), PyMem_Realloc(), or PyMem_Calloc(). + + void PyMem_RawFree(void *p) nogil + void PyMem_Free(void *p) + # Frees the memory block pointed to by p, which must have been + # returned by a previous call to PyMem_Malloc(), PyMem_Realloc(), or + # PyMem_Calloc(). Otherwise, or if PyMem_Free(p) has been called + # before, undefined behavior occurs. If p is NULL, no operation is + # performed. + + # The following type-oriented macros are provided for + # convenience. Note that TYPE refers to any C type. + + # TYPE* PyMem_New(TYPE, size_t n) + # Same as PyMem_Malloc(), but allocates (n * sizeof(TYPE)) bytes + # of memory. Returns a pointer cast to TYPE*. The memory will not + # have been initialized in any way. + + # TYPE* PyMem_Resize(void *p, TYPE, size_t n) + # Same as PyMem_Realloc(), but the memory block is resized to (n * + # sizeof(TYPE)) bytes. Returns a pointer cast to TYPE*. + + void PyMem_Del(void *p) + # Same as PyMem_Free(). + + # In addition, the following macro sets are provided for calling + # the Python memory allocator directly, without involving the C + # API functions listed above. However, note that their use does + # not preserve binary compatibility across Python versions and is + # therefore deprecated in extension modules. + + # PyMem_MALLOC(), PyMem_REALLOC(), PyMem_FREE(). + # PyMem_NEW(), PyMem_RESIZE(), PyMem_DEL(). + + + ##################################################################### + # Raw object memory interface + ##################################################################### + + # Functions to call the same malloc/realloc/free as used by Python's + # object allocator. If WITH_PYMALLOC is enabled, these may differ from + # the platform malloc/realloc/free. The Python object allocator is + # designed for fast, cache-conscious allocation of many "small" objects, + # and with low hidden memory overhead. + # + # PyObject_Malloc(0) returns a unique non-NULL pointer if possible. + # + # PyObject_Realloc(NULL, n) acts like PyObject_Malloc(n). + # PyObject_Realloc(p != NULL, 0) does not return NULL, or free the memory + # at p. + # + # Returned pointers must be checked for NULL explicitly; no action is + # performed on failure other than to return NULL (no warning it printed, no + # exception is set, etc). + # + # For allocating objects, use PyObject_{New, NewVar} instead whenever + # possible. The PyObject_{Malloc, Realloc, Free} family is exposed + # so that you can exploit Python's small-block allocator for non-object + # uses. If you must use these routines to allocate object memory, make sure + # the object gets initialized via PyObject_{Init, InitVar} after obtaining + # the raw memory. + + void* PyObject_Malloc(size_t size) + void* PyObject_Calloc(size_t nelem, size_t elsize) + void* PyObject_Realloc(void *ptr, size_t new_size) + void PyObject_Free(void *ptr) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/memoryview.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/memoryview.pxd new file mode 100644 index 0000000000000000000000000000000000000000..83a84e6f91193e5b9a35f9f400e91f05f2c84e86 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/memoryview.pxd @@ -0,0 +1,50 @@ +cdef extern from "Python.h": + + ########################################################################### + # MemoryView Objects + ########################################################################### + # A memoryview object exposes the C level buffer interface as a Python + # object which can then be passed around like any other object + + object PyMemoryView_FromObject(object obj) + # Return value: New reference. + # Create a memoryview object from an object that provides the buffer + # interface. If obj supports writable buffer exports, the memoryview object + # will be read/write, otherwise it may be either read-only or read/write at + # the discretion of the exporter. + + object PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags) + # Return value: New reference. + # Create a memoryview object using mem as the underlying buffer. flags can + # be one of PyBUF_READ or PyBUF_WRITE. + # New in version 3.3. + + object PyMemoryView_FromBuffer(Py_buffer *view) + # Return value: New reference. + # Create a memoryview object wrapping the given buffer structure view. For + # simple byte buffers, PyMemoryView_FromMemory() is the preferred function. + + object PyMemoryView_GetContiguous(object obj, + int buffertype, + char order) + # Return value: New reference. + # Create a memoryview object to a contiguous chunk of memory (in either ‘C’ + # or ‘F’ortran order) from an object that defines the buffer interface. If + # memory is contiguous, the memoryview object points to the original + # memory. Otherwise, a copy is made and the memoryview points to a new + # bytes object. + + bint PyMemoryView_Check(object obj) + # Return true if the object obj is a memoryview object. It is not currently + # allowed to create subclasses of memoryview. + + Py_buffer *PyMemoryView_GET_BUFFER(object mview) + # Return a pointer to the memoryview’s private copy of the exporter’s + # buffer. mview must be a memoryview instance; this macro doesn’t check its + # type, you must do it yourself or you will risk crashes. + + Py_buffer *PyMemoryView_GET_BASE(object mview) + # Return either a pointer to the exporting object that the memoryview is + # based on or NULL if the memoryview has been created by one of the + # functions PyMemoryView_FromMemory() or PyMemoryView_FromBuffer(). mview + # must be a memoryview instance. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/object.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/object.pxd new file mode 100644 index 0000000000000000000000000000000000000000..41874159ce738ded59b7bf07c8a4779100538138 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/object.pxd @@ -0,0 +1,440 @@ +from libc.stdio cimport FILE +cimport cpython.type + +cdef extern from "Python.h": + + ctypedef struct PyObject # forward declaration + + ctypedef object (*newfunc)(cpython.type.type, PyObject*, PyObject*) # (type, args|NULL, kwargs|NULL) + + ctypedef object (*unaryfunc)(object) + ctypedef object (*binaryfunc)(object, object) + ctypedef object (*ternaryfunc)(object, object, object) + ctypedef int (*inquiry)(object) except -1 + ctypedef Py_ssize_t (*lenfunc)(object) except -1 + ctypedef object (*ssizeargfunc)(object, Py_ssize_t) + ctypedef object (*ssizessizeargfunc)(object, Py_ssize_t, Py_ssize_t) + ctypedef int (*ssizeobjargproc)(object, Py_ssize_t, object) except -1 + ctypedef int (*ssizessizeobjargproc)(object, Py_ssize_t, Py_ssize_t, object) except -1 + ctypedef int (*objobjargproc)(object, object, object) except -1 + ctypedef int (*objobjproc)(object, object) except -1 + + ctypedef Py_hash_t (*hashfunc)(object) except -1 + ctypedef object (*reprfunc)(object) + + ctypedef int (*cmpfunc)(object, object) except -2 + ctypedef object (*richcmpfunc)(object, object, int) + + # The following functions use 'PyObject*' as first argument instead of 'object' to prevent + # accidental reference counting when calling them during a garbage collection run. + ctypedef void (*destructor)(PyObject*) + ctypedef int (*visitproc)(PyObject*, void *) except -1 + ctypedef int (*traverseproc)(PyObject*, visitproc, void*) except -1 + ctypedef void (*freefunc)(void*) + + ctypedef object (*descrgetfunc)(object, object, object) + ctypedef int (*descrsetfunc)(object, object, object) except -1 + + ctypedef object (*PyCFunction)(object, object) + + ctypedef struct PyMethodDef: + const char* ml_name + PyCFunction ml_meth + int ml_flags + const char* ml_doc + + ctypedef struct PyTypeObject: + const char* tp_name + const char* tp_doc + Py_ssize_t tp_basicsize + Py_ssize_t tp_itemsize + Py_ssize_t tp_dictoffset + unsigned long tp_flags + + newfunc tp_new + destructor tp_dealloc + destructor tp_del + destructor tp_finalize + traverseproc tp_traverse + inquiry tp_clear + freefunc tp_free + + ternaryfunc tp_call + hashfunc tp_hash + reprfunc tp_str + reprfunc tp_repr + + cmpfunc tp_compare + richcmpfunc tp_richcompare + + PyMethodDef* tp_methods + + PyTypeObject* tp_base + PyObject* tp_dict + + descrgetfunc tp_descr_get + descrsetfunc tp_descr_set + + unsigned int tp_version_tag + + ctypedef struct PyObject: + Py_ssize_t ob_refcnt + PyTypeObject *ob_type + + cdef PyTypeObject *Py_TYPE(object) + + void* PyObject_Malloc(size_t) + void* PyObject_Realloc(void *, size_t) + void PyObject_Free(void *) + + ##################################################################### + # 6.1 Object Protocol + ##################################################################### + int PyObject_Print(object o, FILE *fp, int flags) except -1 + # Print an object o, on file fp. Returns -1 on error. The flags + # argument is used to enable certain printing options. The only + # option currently supported is Py_PRINT_RAW; if given, the str() + # of the object is written instead of the repr(). + + bint PyObject_HasAttrString(object o, const char *attr_name) + # Returns 1 if o has the attribute attr_name, and 0 + # otherwise. This is equivalent to the Python expression + # "hasattr(o, attr_name)". This function always succeeds. + + object PyObject_GetAttrString(object o, const char *attr_name) + # Return value: New reference. Retrieve an attribute named + # attr_name from object o. Returns the attribute value on success, + # or NULL on failure. This is the equivalent of the Python + # expression "o.attr_name". + + bint PyObject_HasAttr(object o, object attr_name) + # Returns 1 if o has the attribute attr_name, and 0 + # otherwise. This is equivalent to the Python expression + # "hasattr(o, attr_name)". This function always succeeds. + + object PyObject_GetAttr(object o, object attr_name) + # Return value: New reference. Retrieve an attribute named + # attr_name from object o. Returns the attribute value on success, + # or NULL on failure. This is the equivalent of the Python + # expression "o.attr_name". + + object PyObject_GenericGetAttr(object o, object attr_name) + + int PyObject_SetAttrString(object o, const char *attr_name, object v) except -1 + # Set the value of the attribute named attr_name, for object o, to + # the value v. Returns -1 on failure. This is the equivalent of + # the Python statement "o.attr_name = v". + + int PyObject_SetAttr(object o, object attr_name, object v) except -1 + # Set the value of the attribute named attr_name, for object o, to + # the value v. Returns -1 on failure. This is the equivalent of + # the Python statement "o.attr_name = v". + + int PyObject_GenericSetAttr(object o, object attr_name, object v) except -1 + + int PyObject_DelAttrString(object o, const char *attr_name) except -1 + # Delete attribute named attr_name, for object o. Returns -1 on + # failure. This is the equivalent of the Python statement: "del + # o.attr_name". + + int PyObject_DelAttr(object o, object attr_name) except -1 + # Delete attribute named attr_name, for object o. Returns -1 on + # failure. This is the equivalent of the Python statement "del + # o.attr_name". + + object PyObject_GenericGetDict(object o, void *context) + # Return value: New reference. + # A generic implementation for the getter of a __dict__ descriptor. It + # creates the dictionary if necessary. + # New in version 3.3. + + int PyObject_GenericSetDict(object o, object value, void *context) except -1 + # A generic implementation for the setter of a __dict__ descriptor. This + # implementation does not allow the dictionary to be deleted. + # New in version 3.3. + + int Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE + + object PyObject_RichCompare(object o1, object o2, int opid) + # Return value: New reference. + # Compare the values of o1 and o2 using the operation specified by + # opid, which must be one of Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, or + # Py_GE, corresponding to <, <=, ==, !=, >, or >= + # respectively. This is the equivalent of the Python expression + # "o1 op o2", where op is the operator corresponding to + # opid. Returns the value of the comparison on success, or NULL on + # failure. + + bint PyObject_RichCompareBool(object o1, object o2, int opid) except -1 + # Compare the values of o1 and o2 using the operation specified by + # opid, which must be one of Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, or + # Py_GE, corresponding to <, <=, ==, !=, >, or >= + # respectively. Returns -1 on error, 0 if the result is false, 1 + # otherwise. This is the equivalent of the Python expression "o1 + # op o2", where op is the operator corresponding to opid. + + int PyObject_Cmp(object o1, object o2, int *result) except -1 + # Compare the values of o1 and o2 using a routine provided by o1, + # if one exists, otherwise with a routine provided by o2. The + # result of the comparison is returned in result. Returns -1 on + # failure. This is the equivalent of the Python statement "result + # = cmp(o1, o2)". + + int PyObject_Compare(object o1, object o2) except * + # Compare the values of o1 and o2 using a routine provided by o1, + # if one exists, otherwise with a routine provided by o2. Returns + # the result of the comparison on success. On error, the value + # returned is undefined; use PyErr_Occurred() to detect an + # error. This is equivalent to the Python expression "cmp(o1, + # o2)". + + object PyObject_Repr(object o) + # Return value: New reference. + # Compute a string representation of object o. Returns the string + # representation on success, NULL on failure. This is the + # equivalent of the Python expression "repr(o)". Called by the + # repr() built-in function and by reverse quotes. + + object PyObject_Str(object o) + # Return value: New reference. + # Compute a string representation of object o. Returns the string + # representation on success, NULL on failure. This is the + # equivalent of the Python expression "str(o)". Called by the + # str() built-in function and by the print statement. + + object PyObject_Bytes(object o) + # Return value: New reference. + # Compute a bytes representation of object o. Return NULL on + # failure and a bytes object on success. This is equivalent to + # the Python expression bytes(o), when o is not an integer. + # Unlike bytes(o), a TypeError is raised when o is an integer + # instead of a zero-initialized bytes object. + + object PyObject_Unicode(object o) + # Return value: New reference. + # Compute a Unicode string representation of object o. Returns the + # Unicode string representation on success, NULL on failure. This + # is the equivalent of the Python expression "unicode(o)". Called + # by the unicode() built-in function. + + bint PyObject_IsInstance(object inst, object cls) except -1 + # Returns 1 if inst is an instance of the class cls or a subclass + # of cls, or 0 if not. On error, returns -1 and sets an + # exception. If cls is a type object rather than a class object, + # PyObject_IsInstance() returns 1 if inst is of type cls. If cls + # is a tuple, the check will be done against every entry in + # cls. The result will be 1 when at least one of the checks + # returns 1, otherwise it will be 0. If inst is not a class + # instance and cls is neither a type object, nor a class object, + # nor a tuple, inst must have a __class__ attribute -- the class + # relationship of the value of that attribute with cls will be + # used to determine the result of this function. + + # Subclass determination is done in a fairly straightforward way, + # but includes a wrinkle that implementors of extensions to the + # class system may want to be aware of. If A and B are class + # objects, B is a subclass of A if it inherits from A either + # directly or indirectly. If either is not a class object, a more + # general mechanism is used to determine the class relationship of + # the two objects. When testing if B is a subclass of A, if A is + # B, PyObject_IsSubclass() returns true. If A and B are different + # objects, B's __bases__ attribute is searched in a depth-first + # fashion for A -- the presence of the __bases__ attribute is + # considered sufficient for this determination. + + bint PyObject_IsSubclass(object derived, object cls) except -1 + # Returns 1 if the class derived is identical to or derived from + # the class cls, otherwise returns 0. In case of an error, returns + # -1. If cls is a tuple, the check will be done against every + # entry in cls. The result will be 1 when at least one of the + # checks returns 1, otherwise it will be 0. If either derived or + # cls is not an actual class object (or tuple), this function uses + # the generic algorithm described above. New in version + # 2.1. Changed in version 2.3: Older versions of Python did not + # support a tuple as the second argument. + + bint PyCallable_Check(object o) + # Determine if the object o is callable. Return 1 if the object is + # callable and 0 otherwise. This function always succeeds. + + object PyObject_Call(object callable_object, object args, object kw) + # Return value: New reference. + # Call a callable Python object callable_object, with arguments + # given by the tuple args, and named arguments given by the + # dictionary kw. If no named arguments are needed, kw may be + # NULL. args must not be NULL, use an empty tuple if no arguments + # are needed. Returns the result of the call on success, or NULL + # on failure. This is the equivalent of the Python expression + # "apply(callable_object, args, kw)" or "callable_object(*args, + # **kw)". + + object PyObject_CallObject(object callable_object, object args) + # Return value: New reference. + # Call a callable Python object callable_object, with arguments + # given by the tuple args. If no arguments are needed, then args + # may be NULL. Returns the result of the call on success, or NULL + # on failure. This is the equivalent of the Python expression + # "apply(callable_object, args)" or "callable_object(*args)". + + object PyObject_CallFunction(object callable, char *format, ...) + # Return value: New reference. + # Call a callable Python object callable, with a variable number + # of C arguments. The C arguments are described using a + # Py_BuildValue() style format string. The format may be NULL, + # indicating that no arguments are provided. Returns the result of + # the call on success, or NULL on failure. This is the equivalent + # of the Python expression "apply(callable, args)" or + # "callable(*args)". Note that if you only pass object args, + # PyObject_CallFunctionObjArgs is a faster alternative. + + object PyObject_CallMethod(object o, char *method, char *format, ...) + # Return value: New reference. + # Call the method named method of object o with a variable number + # of C arguments. The C arguments are described by a + # Py_BuildValue() format string that should produce a tuple. The + # format may be NULL, indicating that no arguments are + # provided. Returns the result of the call on success, or NULL on + # failure. This is the equivalent of the Python expression + # "o.method(args)". Note that if you only pass object args, + # PyObject_CallMethodObjArgs is a faster alternative. + + #object PyObject_CallFunctionObjArgs(object callable, ..., NULL) + object PyObject_CallFunctionObjArgs(object callable, ...) + # Return value: New reference. + # Call a callable Python object callable, with a variable number + # of PyObject* arguments. The arguments are provided as a variable + # number of parameters followed by NULL. Returns the result of the + # call on success, or NULL on failure. + + #PyObject* PyObject_CallMethodObjArgs(object o, object name, ..., NULL) + object PyObject_CallMethodObjArgs(object o, object name, ...) + # Return value: New reference. + # Calls a method of the object o, where the name of the method is + # given as a Python string object in name. It is called with a + # variable number of PyObject* arguments. The arguments are + # provided as a variable number of parameters followed by + # NULL. Returns the result of the call on success, or NULL on + # failure. + + long PyObject_Hash(object o) except? -1 + # Compute and return the hash value of an object o. On failure, + # return -1. This is the equivalent of the Python expression + # "hash(o)". + + bint PyObject_IsTrue(object o) except -1 + # Returns 1 if the object o is considered to be true, and 0 + # otherwise. This is equivalent to the Python expression "not not + # o". On failure, return -1. + + bint PyObject_Not(object o) except -1 + # Returns 0 if the object o is considered to be true, and 1 + # otherwise. This is equivalent to the Python expression "not + # o". On failure, return -1. + + object PyObject_Type(object o) + # Return value: New reference. + # When o is non-NULL, returns a type object corresponding to the + # object type of object o. On failure, raises SystemError and + # returns NULL. This is equivalent to the Python expression + # type(o). This function increments the reference count of the + # return value. There's really no reason to use this function + # instead of the common expression o->ob_type, which returns a + # pointer of type PyTypeObject*, except when the incremented + # reference count is needed. + + bint PyObject_TypeCheck(object o, PyTypeObject *type) + # Return true if the object o is of type type or a subtype of + # type. Both parameters must be non-NULL. + + Py_ssize_t PyObject_Length(object o) except -1 + Py_ssize_t PyObject_Size(object o) except -1 + # Return the length of object o. If the object o provides either + # the sequence and mapping protocols, the sequence length is + # returned. On error, -1 is returned. This is the equivalent to + # the Python expression "len(o)". + + Py_ssize_t PyObject_LengthHint(object o, Py_ssize_t default) except -1 + # Return an estimated length for the object o. First try to return its + # actual length, then an estimate using __length_hint__(), and finally + # return the default value. On error, return -1. This is the equivalent to + # the Python expression "operator.length_hint(o, default)". + # New in version 3.4. + + object PyObject_GetItem(object o, object key) + # Return value: New reference. + # Return element of o corresponding to the object key or NULL on + # failure. This is the equivalent of the Python expression + # "o[key]". + + int PyObject_SetItem(object o, object key, object v) except -1 + # Map the object key to the value v. Returns -1 on failure. This + # is the equivalent of the Python statement "o[key] = v". + + int PyObject_DelItem(object o, object key) except -1 + # Delete the mapping for key from o. Returns -1 on failure. This + # is the equivalent of the Python statement "del o[key]". + + int PyObject_AsFileDescriptor(object o) except -1 + # Derives a file-descriptor from a Python object. If the object is + # an integer or long integer, its value is returned. If not, the + # object's fileno() method is called if it exists; the method must + # return an integer or long integer, which is returned as the file + # descriptor value. Returns -1 on failure. + + object PyObject_Dir(object o) + # Return value: New reference. + # This is equivalent to the Python expression "dir(o)", returning + # a (possibly empty) list of strings appropriate for the object + # argument, or NULL if there was an error. If the argument is + # NULL, this is like the Python "dir()", returning the names of + # the current locals; in this case, if no execution frame is + # active then NULL is returned but PyErr_Occurred() will return + # false. + + object PyObject_GetIter(object o) + # Return value: New reference. + # This is equivalent to the Python expression "iter(o)". It + # returns a new iterator for the object argument, or the object + # itself if the object is already an iterator. Raises TypeError + # and returns NULL if the object cannot be iterated. + + Py_ssize_t Py_SIZE(object o) + + object PyObject_Format(object obj, object format_spec) + # Takes an arbitrary object and returns the result of calling + # obj.__format__(format_spec). + # Added in Py2.6 + + # Type flags (tp_flags of PyTypeObject) + long Py_TPFLAGS_HAVE_GETCHARBUFFER + long Py_TPFLAGS_HAVE_SEQUENCE_IN + long Py_TPFLAGS_HAVE_INPLACEOPS + long Py_TPFLAGS_CHECKTYPES + long Py_TPFLAGS_HAVE_RICHCOMPARE + long Py_TPFLAGS_HAVE_WEAKREFS + long Py_TPFLAGS_HAVE_ITER + long Py_TPFLAGS_HAVE_CLASS + long Py_TPFLAGS_HEAPTYPE + long Py_TPFLAGS_BASETYPE + long Py_TPFLAGS_READY + long Py_TPFLAGS_READYING + long Py_TPFLAGS_HAVE_GC + long Py_TPFLAGS_HAVE_STACKLESS_EXTENSION + long Py_TPFLAGS_HAVE_INDEX + long Py_TPFLAGS_HAVE_VERSION_TAG + long Py_TPFLAGS_VALID_VERSION_TAG + long Py_TPFLAGS_IS_ABSTRACT + long Py_TPFLAGS_HAVE_NEWBUFFER + long Py_TPFLAGS_INT_SUBCLASS + long Py_TPFLAGS_LONG_SUBCLASS + long Py_TPFLAGS_LIST_SUBCLASS + long Py_TPFLAGS_TUPLE_SUBCLASS + long Py_TPFLAGS_STRING_SUBCLASS + long Py_TPFLAGS_UNICODE_SUBCLASS + long Py_TPFLAGS_DICT_SUBCLASS + long Py_TPFLAGS_BASE_EXC_SUBCLASS + long Py_TPFLAGS_TYPE_SUBCLASS + long Py_TPFLAGS_DEFAULT_EXTERNAL + long Py_TPFLAGS_DEFAULT_CORE + long Py_TPFLAGS_DEFAULT + long Py_TPFLAGS_HAVE_FINALIZE diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/oldbuffer.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/oldbuffer.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0222428ed48e1cb84cde849f83b36e3079089245 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/oldbuffer.pxd @@ -0,0 +1,63 @@ +# Legacy Python 2 buffer interface. +# +# These functions are no longer available in Python 3, use the new +# buffer interface instead. + +cdef extern from "Python.h": + cdef enum _: + Py_END_OF_BUFFER + # This constant may be passed as the size parameter to + # PyBuffer_FromObject() or PyBuffer_FromReadWriteObject(). It + # indicates that the new PyBufferObject should refer to base object + # from the specified offset to the end of its exported + # buffer. Using this enables the caller to avoid querying the base + # object for its length. + + bint PyBuffer_Check(object p) + # Return true if the argument has type PyBuffer_Type. + + object PyBuffer_FromObject(object base, Py_ssize_t offset, Py_ssize_t size) + # Return value: New reference. + # + # Return a new read-only buffer object. This raises TypeError if + # base doesn't support the read-only buffer protocol or doesn't + # provide exactly one buffer segment, or it raises ValueError if + # offset is less than zero. The buffer will hold a reference to the + # base object, and the buffer's contents will refer to the base + # object's buffer interface, starting as position offset and + # extending for size bytes. If size is Py_END_OF_BUFFER, then the + # new buffer's contents extend to the length of the base object's + # exported buffer data. + + object PyBuffer_FromReadWriteObject(object base, Py_ssize_t offset, Py_ssize_t size) + # Return value: New reference. + # + # Return a new writable buffer object. Parameters and exceptions + # are similar to those for PyBuffer_FromObject(). If the base + # object does not export the writeable buffer protocol, then + # TypeError is raised. + + object PyBuffer_FromMemory(void *ptr, Py_ssize_t size) + # Return value: New reference. + # + # Return a new read-only buffer object that reads from a specified + # location in memory, with a specified size. The caller is + # responsible for ensuring that the memory buffer, passed in as + # ptr, is not deallocated while the returned buffer object + # exists. Raises ValueError if size is less than zero. Note that + # Py_END_OF_BUFFER may not be passed for the size parameter; + # ValueError will be raised in that case. + + object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t size) + # Return value: New reference. + # + # Similar to PyBuffer_FromMemory(), but the returned buffer is + # writable. + + object PyBuffer_New(Py_ssize_t size) + # Return value: New reference. + # + # Return a new writable buffer object that maintains its own memory + # buffer of size bytes. ValueError is returned if size is not zero + # or positive. Note that the memory buffer (as returned by + # PyObject_AsWriteBuffer()) is not specifically aligned. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pycapsule.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pycapsule.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1c21b370b3c2939361ff071798a6bcd4fdf82473 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pycapsule.pxd @@ -0,0 +1,143 @@ + +# available since Python 2.7! + + +cdef extern from "Python.h": + + ctypedef struct PyCapsule_Type + # This subtype of PyObject represents an opaque value, useful for + # C extension modules who need to pass an opaque value (as a void* + # pointer) through Python code to other C code. It is often used + # to make a C function pointer defined in one module available to + # other modules, so the regular import mechanism can be used to + # access C APIs defined in dynamically loaded modules. + + + ctypedef void (*PyCapsule_Destructor)(object o) noexcept + # The type of a destructor callback for a capsule. + # + # See PyCapsule_New() for the semantics of PyCapsule_Destructor + # callbacks. + + + bint PyCapsule_CheckExact(object o) + # Return true if its argument is a PyCapsule. + + + object PyCapsule_New(void *pointer, const char *name, + PyCapsule_Destructor destructor) + # Return value: New reference. + # + # Create a PyCapsule encapsulating the pointer. The pointer + # argument may not be NULL. + # + # On failure, set an exception and return NULL. + # + # The name string may either be NULL or a pointer to a valid C + # string. If non-NULL, this string must outlive the + # capsule. (Though it is permitted to free it inside the + # destructor.) + # + # If the destructor argument is not NULL, it will be called with + # the capsule as its argument when it is destroyed. + # + # If this capsule will be stored as an attribute of a module, the + # name should be specified as modulename.attributename. This will + # enable other modules to import the capsule using + # PyCapsule_Import(). + + + void* PyCapsule_GetPointer(object capsule, const char *name) except? NULL + # Retrieve the pointer stored in the capsule. On failure, set an + # exception and return NULL. + # + # The name parameter must compare exactly to the name stored in + # the capsule. If the name stored in the capsule is NULL, the name + # passed in must also be NULL. Python uses the C function strcmp() + # to compare capsule names. + + + PyCapsule_Destructor PyCapsule_GetDestructor(object capsule) except? NULL + # Return the current destructor stored in the capsule. On failure, + # set an exception and return NULL. + # + # It is legal for a capsule to have a NULL destructor. This makes + # a NULL return code somewhat ambiguous; use PyCapsule_IsValid() + # or PyErr_Occurred() to disambiguate. + + + const char* PyCapsule_GetName(object capsule) except? NULL + # Return the current name stored in the capsule. On failure, set + # an exception and return NULL. + # + # It is legal for a capsule to have a NULL name. This makes a NULL + # return code somewhat ambiguous; use PyCapsule_IsValid() or + # PyErr_Occurred() to disambiguate. + + + void* PyCapsule_GetContext(object capsule) except? NULL + # Return the current context stored in the capsule. On failure, + # set an exception and return NULL. + # + # It is legal for a capsule to have a NULL context. This makes a + # NULL return code somewhat ambiguous; use PyCapsule_IsValid() or + # PyErr_Occurred() to disambiguate. + + + bint PyCapsule_IsValid(object capsule, const char *name) + # Determines whether or not capsule is a valid capsule. A valid + # capsule is non-NULL, passes PyCapsule_CheckExact(), has a + # non-NULL pointer stored in it, and its internal name matches the + # name parameter. (See PyCapsule_GetPointer() for information on + # how capsule names are compared.) + # + # In other words, if PyCapsule_IsValid() returns a true value, + # calls to any of the accessors (any function starting with + # PyCapsule_Get()) are guaranteed to succeed. + # + # Return a nonzero value if the object is valid and matches the + # name passed in. Return 0 otherwise. This function will not fail. + + + int PyCapsule_SetPointer(object capsule, void *pointer) except -1 + # Set the void pointer inside capsule to pointer. The pointer may + # not be NULL. + # + # Return 0 on success. Return nonzero and set an exception on + # failure. + + + int PyCapsule_SetDestructor(object capsule, PyCapsule_Destructor destructor) except -1 + # Set the destructor inside capsule to destructor. + # + # Return 0 on success. Return nonzero and set an exception on + # failure. + + + int PyCapsule_SetName(object capsule, const char *name) except -1 + # Set the name inside capsule to name. If non-NULL, the name must + # outlive the capsule. If the previous name stored in the capsule + # was not NULL, no attempt is made to free it. + # + # Return 0 on success. Return nonzero and set an exception on + # failure. + + + int PyCapsule_SetContext(object capsule, void *context) except -1 + # Set the context pointer inside capsule to context. Return 0 on + # success. Return nonzero and set an exception on failure. + + + void* PyCapsule_Import(const char *name, int no_block) except? NULL + # Import a pointer to a C object from a capsule attribute in a + # module. The name parameter should specify the full name to the + # attribute, as in module.attribute. The name stored in the + # capsule must match this string exactly. If no_block is true, + # import the module without blocking (using + # PyImport_ImportModuleNoBlock()). If no_block is false, import + # the module conventionally (using PyImport_ImportModule()). + # + # Return the capsule’s internal pointer on success. On failure, + # set an exception and return NULL. However, if PyCapsule_Import() + # failed to import the module, and no_block was true, no exception + # is set. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pyport.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pyport.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fec59c9c8c4944c7c8ddec39d0e047bb0a0461bc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pyport.pxd @@ -0,0 +1,8 @@ +cdef extern from "Python.h": + ctypedef int int32_t + ctypedef int int64_t + ctypedef unsigned int uint32_t + ctypedef unsigned int uint64_t + + const Py_ssize_t PY_SSIZE_T_MIN + const Py_ssize_t PY_SSIZE_T_MAX diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pystate.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pystate.pxd new file mode 100644 index 0000000000000000000000000000000000000000..ee8856b20abe758e6a454eb76f97867ecfb9a111 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/pystate.pxd @@ -0,0 +1,95 @@ +# Thread and interpreter state structures and their interfaces + +from .object cimport PyObject + +cdef extern from "Python.h": + + # We make these an opaque types. If the user wants specific attributes, + # they can be declared manually. + + ctypedef long PY_INT64_T # FIXME: Py2.7+, not defined here but used here + + ctypedef struct PyInterpreterState: + pass + + ctypedef struct PyThreadState: + pass + + ctypedef struct PyFrameObject: + pass + + # This is not actually a struct, but make sure it can never be coerced to + # an int or used in arithmetic expressions + ctypedef struct PyGILState_STATE: + pass + + # The type of the trace function registered using PyEval_SetProfile() and + # PyEval_SetTrace(). + # Py_tracefunc return -1 when raising an exception, or 0 for success. + ctypedef int (*Py_tracefunc)(PyObject *, PyFrameObject *, int, PyObject *) + + # The following values are used for 'what' for tracefunc functions + enum: + PyTrace_CALL + PyTrace_EXCEPTION + PyTrace_LINE + PyTrace_RETURN + PyTrace_C_CALL + PyTrace_C_EXCEPTION + PyTrace_C_RETURN + + + PyInterpreterState * PyInterpreterState_New() + void PyInterpreterState_Clear(PyInterpreterState *) + void PyInterpreterState_Delete(PyInterpreterState *) + PY_INT64_T PyInterpreterState_GetID(PyInterpreterState *) + + PyThreadState * PyThreadState_New(PyInterpreterState *) + void PyThreadState_Clear(PyThreadState *) + void PyThreadState_Delete(PyThreadState *) + + PyThreadState * PyThreadState_Get() + PyThreadState * PyThreadState_Swap(PyThreadState *) # NOTE: DO NOT USE IN CYTHON CODE ! + PyObject * PyThreadState_GetDict() + int PyThreadState_SetAsyncExc(long, PyObject *) + + # Ensure that the current thread is ready to call the Python + # C API, regardless of the current state of Python, or of its + # thread lock. This may be called as many times as desired + # by a thread so long as each call is matched with a call to + # PyGILState_Release(). In general, other thread-state APIs may + # be used between _Ensure() and _Release() calls, so long as the + # thread-state is restored to its previous state before the Release(). + # For example, normal use of the Py_BEGIN_ALLOW_THREADS/ + # Py_END_ALLOW_THREADS macros are acceptable. + + # The return value is an opaque "handle" to the thread state when + # PyGILState_Ensure() was called, and must be passed to + # PyGILState_Release() to ensure Python is left in the same state. Even + # though recursive calls are allowed, these handles can *not* be shared - + # each unique call to PyGILState_Ensure must save the handle for its + # call to PyGILState_Release. + + # When the function returns, the current thread will hold the GIL. + + # Failure is a fatal error. + PyGILState_STATE PyGILState_Ensure() + + # Release any resources previously acquired. After this call, Python's + # state will be the same as it was prior to the corresponding + # PyGILState_Ensure() call (but generally this state will be unknown to + # the caller, hence the use of the GILState API.) + + # Every call to PyGILState_Ensure must be matched by a call to + # PyGILState_Release on the same thread. + void PyGILState_Release(PyGILState_STATE) + + # Return 1 if the current thread holds the GIL and 0 otherwise. + int PyGILState_Check() + + # Routines for advanced debuggers, requested by David Beazley. + # Don't use unless you know what you are doing! + PyInterpreterState * PyInterpreterState_Head() + PyInterpreterState * PyInterpreterState_Next(PyInterpreterState *) + PyThreadState * PyInterpreterState_ThreadHead(PyInterpreterState *) + PyThreadState * PyThreadState_Next(PyThreadState *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/sequence.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/sequence.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e50e4c495ea598e1e2b8ece8a8447337354ada93 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/sequence.pxd @@ -0,0 +1,134 @@ +from .object cimport PyObject + +cdef extern from "Python.h": + + ############################################################################ + # 6.3 Sequence Protocol + ############################################################################ + + bint PySequence_Check(object o) + # Return 1 if the object provides sequence protocol, and 0 + # otherwise. This function always succeeds. + + Py_ssize_t PySequence_Size(object o) except -1 + # Returns the number of objects in sequence o on success, and -1 + # on failure. For objects that do not provide sequence protocol, + # this is equivalent to the Python expression "len(o)". + + Py_ssize_t PySequence_Length(object o) except -1 + # Alternate name for PySequence_Size(). + + object PySequence_Concat(object o1, object o2) + # Return value: New reference. + # Return the concatenation of o1 and o2 on success, and NULL on + # failure. This is the equivalent of the Python expression "o1 + + # o2". + + object PySequence_Repeat(object o, Py_ssize_t count) + # Return value: New reference. + # Return the result of repeating sequence object o count times, or + # NULL on failure. This is the equivalent of the Python expression + # "o * count". + + object PySequence_InPlaceConcat(object o1, object o2) + # Return value: New reference. + # Return the concatenation of o1 and o2 on success, and NULL on + # failure. The operation is done in-place when o1 supports + # it. This is the equivalent of the Python expression "o1 += o2". + + object PySequence_InPlaceRepeat(object o, Py_ssize_t count) + # Return value: New reference. + # Return the result of repeating sequence object o count times, or + # NULL on failure. The operation is done in-place when o supports + # it. This is the equivalent of the Python expression "o *= + # count". + + object PySequence_GetItem(object o, Py_ssize_t i) + # Return value: New reference. + # Return the ith element of o, or NULL on failure. This is the + # equivalent of the Python expression "o[i]". + + object PySequence_GetSlice(object o, Py_ssize_t i1, Py_ssize_t i2) + # Return value: New reference. + # Return the slice of sequence object o between i1 and i2, or NULL + # on failure. This is the equivalent of the Python expression + # "o[i1:i2]". + + int PySequence_SetItem(object o, Py_ssize_t i, object v) except -1 + # Assign object v to the ith element of o. Returns -1 on + # failure. This is the equivalent of the Python statement "o[i] = + # v". This function does not steal a reference to v. + + int PySequence_DelItem(object o, Py_ssize_t i) except -1 + # Delete the ith element of object o. Returns -1 on failure. This + # is the equivalent of the Python statement "del o[i]". + + int PySequence_SetSlice(object o, Py_ssize_t i1, Py_ssize_t i2, object v) except -1 + # Assign the sequence object v to the slice in sequence object o + # from i1 to i2. This is the equivalent of the Python statement + # "o[i1:i2] = v". + + int PySequence_DelSlice(object o, Py_ssize_t i1, Py_ssize_t i2) except -1 + # Delete the slice in sequence object o from i1 to i2. Returns -1 + # on failure. This is the equivalent of the Python statement "del + # o[i1:i2]". + + int PySequence_Count(object o, object value) except -1 + # Return the number of occurrences of value in o, that is, return + # the number of keys for which o[key] == value. On failure, return + # -1. This is equivalent to the Python expression + # "o.count(value)". + + int PySequence_Contains(object o, object value) except -1 + # Determine if o contains value. If an item in o is equal to + # value, return 1, otherwise return 0. On error, return -1. This + # is equivalent to the Python expression "value in o". + + Py_ssize_t PySequence_Index(object o, object value) except -1 + # Return the first index i for which o[i] == value. On error, + # return -1. This is equivalent to the Python expression + # "o.index(value)". + + object PySequence_List(object o) + # Return value: New reference. + # Return a list object with the same contents as the arbitrary + # sequence o. The returned list is guaranteed to be new. + + object PySequence_Tuple(object o) + # Return value: New reference. + # Return a tuple object with the same contents as the arbitrary + # sequence o or NULL on failure. If o is a tuple, a new reference + # will be returned, otherwise a tuple will be constructed with the + # appropriate contents. This is equivalent to the Python + # expression "tuple(o)". + + object PySequence_Fast(object o, char *m) + # Return value: New reference. + # Returns the sequence o as a tuple, unless it is already a tuple + # or list, in which case o is returned. Use + # PySequence_Fast_GET_ITEM() to access the members of the + # result. Returns NULL on failure. If the object is not a + # sequence, raises TypeError with m as the message text. + + PyObject* PySequence_Fast_GET_ITEM(object o, Py_ssize_t i) + # Return value: Borrowed reference. + # Return the ith element of o, assuming that o was returned by + # PySequence_Fast(), o is not NULL, and that i is within bounds. + + PyObject** PySequence_Fast_ITEMS(object o) + # Return the underlying array of PyObject pointers. Assumes that o + # was returned by PySequence_Fast() and o is not NULL. + + object PySequence_ITEM(object o, Py_ssize_t i) + # Return value: New reference. + # Return the ith element of o or NULL on failure. Macro form of + # PySequence_GetItem() but without checking that + # PySequence_Check(o) is true and without adjustment for negative + # indices. + + Py_ssize_t PySequence_Fast_GET_SIZE(object o) + # Returns the length of o, assuming that o was returned by + # PySequence_Fast() and that o is not NULL. The size can also be + # gotten by calling PySequence_Size() on o, but + # PySequence_Fast_GET_SIZE() is faster because it can assume o is + # a list or tuple. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/type.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/type.pxd new file mode 100644 index 0000000000000000000000000000000000000000..928a748cd3e245818bde517f494e2283aac97354 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/type.pxd @@ -0,0 +1,53 @@ + +cdef extern from "Python.h": + # The C structure of the objects used to describe built-in types. + + ############################################################################ + # 7.1.1 Type Objects + ############################################################################ + + ctypedef class __builtin__.type [object PyTypeObject]: + pass + + # PyObject* PyType_Type + # This is the type object for type objects; it is the same object + # as type and types.TypeType in the Python layer. + + bint PyType_Check(object o) + # Return true if the object o is a type object, including + # instances of types derived from the standard type object. Return + # false in all other cases. + + bint PyType_CheckExact(object o) + # Return true if the object o is a type object, but not a subtype + # of the standard type object. Return false in all other + # cases. + + void PyType_Modified(type type) + # Invalidate the internal lookup cache for the type and all of its + # subtypes. This function must be called after any manual modification + # of the attributes or base classes of the type. + + bint PyType_HasFeature(object o, int feature) + # Return true if the type object o sets the feature feature. Type + # features are denoted by single bit flags. + + bint PyType_IS_GC(object o) + # Return true if the type object includes support for the cycle + # detector; this tests the type flag Py_TPFLAGS_HAVE_GC. + + bint PyType_IsSubtype(type a, type b) + # Return true if a is a subtype of b. + + object PyType_GenericAlloc(object type, Py_ssize_t nitems) + # Return value: New reference. + + object PyType_GenericNew(type type, object args, object kwds) + # Return value: New reference. + + bint PyType_Ready(type type) except -1 + # Finalize a type object. This should be called on all type + # objects to finish their initialization. This function is + # responsible for adding inherited slots from a type's base + # class. Return 0 on success, or return -1 and sets an exception + # on error. diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/version.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/version.pxd new file mode 100644 index 0000000000000000000000000000000000000000..ce31b249cf0a86ee14da4e05203b089f7d98f9c4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/cpython/version.pxd @@ -0,0 +1,32 @@ +# Python version constants +# +# It's better to evaluate these at runtime (i.e. C compile time) using +# +# if PY_MAJOR_VERSION >= 3: +# do_stuff_in_Py3_0_and_later() +# if PY_VERSION_HEX >= 0x02070000: +# do_stuff_in_Py2_7_and_later() +# +# than using the IF/DEF statements, which are evaluated at Cython +# compile time. This will keep your C code portable. + + +cdef extern from *: + # the complete version, e.g. 0x010502B2 == 1.5.2b2 + int PY_VERSION_HEX + + # the individual sections as plain numbers + int PY_MAJOR_VERSION + int PY_MINOR_VERSION + int PY_MICRO_VERSION + int PY_RELEASE_LEVEL + int PY_RELEASE_SERIAL + + # Note: PY_RELEASE_LEVEL is one of + # 0xA (alpha) + # 0xB (beta) + # 0xC (release candidate) + # 0xF (final) + + char PY_VERSION[] + char PY_PATCHLEVEL_REVISION[] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/__init__.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..111ea25c2f27d2feeec82db94dee6954bce27976 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/__init__.pxd @@ -0,0 +1,4 @@ +cdef extern from *: + ctypedef bint bool + ctypedef void* nullptr_t + nullptr_t nullptr diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/algorithm.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/algorithm.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b800c569ccf98788ae4e7add08b4a7191a4978d7 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/algorithm.pxd @@ -0,0 +1,320 @@ +from libcpp cimport bool +from libcpp.utility cimport pair +from libc.stddef import ptrdiff_t + + +cdef extern from "" namespace "std" nogil: + # Non-modifying sequence operations + bool all_of[Iter, Pred](Iter first, Iter last, Pred pred) except + + bool all_of[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred pred) except + + bool any_of[Iter, Pred](Iter first, Iter last, Pred pred) except + + bool any_of[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred pred) except + + bool none_of[Iter, Pred](Iter first, Iter last, Pred pred) except + + bool none_of[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred pred) except + + + void for_each[Iter, UnaryFunction](Iter first, Iter last, UnaryFunction f) except + # actually returns f + void for_each[ExecutionPolicy, Iter, UnaryFunction](ExecutionPolicy&& policy, Iter first, Iter last, UnaryFunction f) except + # actually returns f + + ptrdiff_t count[Iter, T](Iter first, Iter last, const T& value) except + + ptrdiff_t count[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + ptrdiff_t count_if[Iter, Pred](Iter first, Iter last, Pred pred) except + + ptrdiff_t count_if[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred pred) except + + + pair[Iter1, Iter2] mismatch[Iter1, Iter2]( + Iter1 first1, Iter1 last1, Iter2 first2) except + # other overloads are tricky + pair[Iter1, Iter2] mismatch[ExecutionPolicy, Iter1, Iter2]( + ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2) except + + + Iter find[Iter, T](Iter first, Iter last, const T& value) except + + Iter find[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + + Iter find_if[Iter, Pred](Iter first, Iter last, Pred pred) except + + Iter find_if[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred pred) except + + Iter find_if_not[Iter, Pred](Iter first, Iter last, Pred pred) except + + Iter find_if_not[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred pred) except + + + Iter1 find_end[Iter1, Iter2](Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2) except + + Iter1 find_end[ExecutionPolicy, Iter1, Iter2](ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2) except + + Iter1 find_end[Iter1, Iter2, BinaryPred]( + Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, BinaryPred pred) except + + Iter1 find_end[ExecutionPolicy, Iter1, Iter2, BinaryPred]( + ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, BinaryPred pred) except + + + + Iter1 find_first_of[Iter1, Iter2](Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2) except + + Iter1 find_first_of[Iter1, Iter2, BinaryPred]( + Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, BinaryPred pred) except + + Iter1 find_first_of[ExecutionPolicy, Iter1, Iter2](ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2) except + + Iter1 find_first_of[ExecutionPolicy, Iter1, Iter2, BinaryPred]( + ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, BinaryPred pred) except + + + Iter adjacent_find[Iter](Iter first, Iter last) except + + Iter adjacent_find[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + Iter adjacent_find[Iter, BinaryPred](Iter first, Iter last, BinaryPred pred) except + + Iter adjacent_find[ExecutionPolicy, Iter, BinaryPred](ExecutionPolicy&& policy, Iter first, Iter last, BinaryPred pred) except + + + Iter1 search[Iter1, Iter2](Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2) except + + Iter1 search[ExecutionPolicy, Iter1, Iter2](ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2) except + + Iter1 search[Iter1, Iter2, BinaryPred]( + Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, BinaryPred pred) except + + Iter1 search[ExecutionPolicy, Iter1, Iter2, BinaryPred]( + ExecutionPolicy&& policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, BinaryPred pred) except + + Iter search_n[Iter, Size, T](Iter first1, Iter last1, Size count, const T& value) except + + Iter search_n[ExecutionPolicy, Iter, Size, T](ExecutionPolicy&& policy, Iter first1, Iter last1, Size count, const T& value) except + + Iter search_n[Iter, Size, T, BinaryPred]( + Iter first1, Iter last1, Size count, const T& value, BinaryPred pred) except + + Iter search_n[ExecutionPolicy, Iter, Size, T, BinaryPred]( + ExecutionPolicy&& policy, Iter first1, Iter last1, Size count, const T& value, BinaryPred pred) except + + + # Modifying sequence operations + OutputIt copy[InputIt, OutputIt](InputIt first, InputIt last, OutputIt d_first) except + + OutputIt copy[ExecutionPolicy, InputIt, OutputIt](ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first) except + + OutputIt copy_if[InputIt, OutputIt, Pred](InputIt first, InputIt last, OutputIt d_first, Pred pred) except + + OutputIt copy_if[ExecutionPolicy, InputIt, OutputIt, Pred](ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, Pred pred) except + + OutputIt copy_n[InputIt, Size, OutputIt](InputIt first, Size count, OutputIt result) except + + OutputIt copy_n[ExecutionPolicy, InputIt, Size, OutputIt](ExecutionPolicy&& policy, InputIt first, Size count, OutputIt result) except + + Iter2 copy_backward[Iter1, Iter2](Iter1 first, Iter1 last, Iter2 d_last) except + + Iter2 copy_backward[ExecutionPolicy, Iter1, Iter2](ExecutionPolicy&& policy, Iter1 first, Iter1 last, Iter2 d_last) except + + + OutputIt move[InputIt, OutputIt](InputIt first, InputIt last, OutputIt d_first) except + + OutputIt move[ExecutionPolicy, InputIt, OutputIt](ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first) except + + Iter2 move_backward[Iter1, Iter2](Iter1 first, Iter1 last, Iter2 d_last) except + + Iter2 move_backward[ExecutionPolicy, Iter1, Iter2](ExecutionPolicy&& policy, Iter1 first, Iter1 last, Iter2 d_last) except + + + void fill[Iter, T](Iter first, Iter last, const T& value) except + + void fill[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + Iter fill_n[Iter, Size, T](Iter first, Size count, const T& value) except + + Iter fill_n[ExecutionPolicy, Iter, Size, T](ExecutionPolicy&& policy, Iter first, Size count, const T& value) except + + + OutputIt transform[InputIt, OutputIt, UnaryOp]( + InputIt first1, InputIt last1, OutputIt d_first, UnaryOp unary_op) except + + + # This overload is ambiguous with the next one. We just let C++ disambiguate from the arguments + # OutputIt transform[ExecutionPolicy, InputIt, OutputIt, UnaryOp]( + # ExecutionPolicy&& policy, InputIt first1, InputIt last1, OutputIt d_first, UnaryOp unary_op) except + + + OutputIt transform[InputIt1, InputIt2, OutputIt, BinaryOp]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, OutputIt d_first, BinaryOp binary_op) except + + + OutputIt transform[ExecutionPolicy, InputIt1, InputIt2, OutputIt, BinaryOp]( + ExecutionPolicy&& policy, InputIt1 first1, InputIt1 last1, InputIt2 first2, OutputIt d_first, BinaryOp binary_op) except + + + void generate[Iter, Generator](Iter first, Iter last, Generator g) except + + void generate[ExecutionPolicy, Iter, Generator](ExecutionPolicy&& policy, Iter first, Iter last, Generator g) except + + void generate_n[Iter, Size, Generator](Iter first, Size count, Generator g) except + + void generate_n[ExecutionPolicy, Iter, Size, Generator](ExecutionPolicy&& policy, Iter first, Size count, Generator g) except + + + Iter remove[Iter, T](Iter first, Iter last, const T& value) except + + Iter remove[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + Iter remove_if[Iter, UnaryPred](Iter first, Iter last, UnaryPred pred) except + + Iter remove_if[ExecutionPolicy, Iter, UnaryPred](ExecutionPolicy&& policy, Iter first, Iter last, UnaryPred pred) except + + OutputIt remove_copy[InputIt, OutputIt, T](InputIt first, InputIt last, OutputIt d_first, const T& value) except + + OutputIt remove_copy[ExecutionPolicy, InputIt, OutputIt, T](ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, const T& value) except + + OutputIt remove_copy_if[InputIt, OutputIt, UnaryPred]( + InputIt first, InputIt last, OutputIt d_first, UnaryPred pred) except + + OutputIt remove_copy_if[ExecutionPolicy, InputIt, OutputIt, UnaryPred]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, UnaryPred pred) except + + + void replace[Iter, T](Iter first, Iter last, const T& old_value, const T& new_value) except + + void replace[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& old_value, const T& new_value) except + + void replace_if[Iter, UnaryPred, T](Iter first, Iter last, UnaryPred pred, const T& new_value) except + + OutputIt replace_copy[InputIt, OutputIt, T]( + InputIt first, InputIt last, OutputIt d_first, const T& old_value, const T& new_value) except + + void replace_if[ExecutionPolicy, Iter, UnaryPred, T](ExecutionPolicy&& policy, Iter first, Iter last, UnaryPred pred, const T& new_value) except + + + OutputIt replace_copy[ExecutionPolicy, InputIt, OutputIt, T]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, const T& old_value, const T& new_value) except + + OutputIt replace_copy_if[InputIt, OutputIt, UnaryPred, T]( + InputIt first, InputIt last, OutputIt d_first, UnaryPred pred, const T& new_value) except + + OutputIt replace_copy_if[ExecutionPolicy, InputIt, OutputIt, UnaryPred, T]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, UnaryPred pred, const T& new_value) except + + + void swap[T](T& a, T& b) except + # array overload also works + Iter2 swap_ranges[Iter1, Iter2](Iter1 first1, Iter1 last1, Iter2 first2) except + + void iter_swap[Iter](Iter a, Iter b) except + + + void reverse[Iter](Iter first, Iter last) except + + void reverse[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + OutputIt reverse_copy[InputIt, OutputIt](InputIt first, InputIt last, OutputIt d_first) except + + OutputIt reverse_copy[ExecutionPolicy, InputIt, OutputIt](ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first) except + + + Iter rotate[Iter](Iter first, Iter n_first, Iter last) except + + Iter rotate[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter n_first, Iter last) except + + OutputIt rotate_copy[InputIt, OutputIt](InputIt first, InputIt n_first, InputIt last, OutputIt d_first) except + + OutputIt rotate_copy[ExecutionPolicy, InputIt, OutputIt](ExecutionPolicy&& policy, InputIt first, InputIt n_first, InputIt last, OutputIt d_first) except + + + Iter unique[Iter](Iter first, Iter last) except + + Iter unique[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + Iter unique[Iter, BinaryPred](Iter first, Iter last, BinaryPred p) except + + Iter unique[ExecutionPolicy, Iter, BinaryPred](ExecutionPolicy&& policy, Iter first, Iter last, BinaryPred p) except + + OutputIt unique_copy[InputIt, OutputIt](InputIt first, InputIt last, OutputIt d_first) except + + OutputIt unique_copy[ExecutionPolicy, InputIt, OutputIt](ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first) except + + OutputIt unique_copy[InputIt, OutputIt, BinaryPred]( + InputIt first, InputIt last, OutputIt d_first, BinaryPred pred) except + + OutputIt unique_copy[ExecutionPolicy, InputIt, OutputIt, BinaryPred]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, BinaryPred pred) except + + + SampleIt sample[PopulationIt, SampleIt, Distance, URBG](PopulationIt first, PopulationIt last, SampleIt out, Distance n, URBG&& g) except + + + # Partitioning operations + bool is_partitioned[Iter, Pred](Iter first, Iter last, Pred p) except + + bool is_partitioned[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred p) except + + Iter partition[Iter, Pred](Iter first, Iter last, Pred p) except + + Iter partition[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred p) except + + pair[OutputIt1, OutputIt2] partition_copy[InputIt, OutputIt1, OutputIt2, Pred]( + InputIt first, InputIt last, OutputIt1 d_first_true, OutputIt2 d_first_false, Pred p) except + + pair[OutputIt1, OutputIt2] partition_copy[ExecutionPolicy, InputIt, OutputIt1, OutputIt2, Pred]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt1 d_first_true, OutputIt2 d_first_false, Pred p) except + + + Iter stable_partition[Iter, Pred](Iter first, Iter last, Pred p) except + + Iter stable_partition[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred p) except + + Iter partition_point[Iter, Pred](Iter first, Iter last, Pred p) except + + Iter partition_point[ExecutionPolicy, Iter, Pred](ExecutionPolicy&& policy, Iter first, Iter last, Pred p) except + + + # Sorting operations + bool is_sorted[Iter](Iter first, Iter last) except + + bool is_sorted[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + bool is_sorted[Iter, Compare](Iter first, Iter last, Compare comp) except + + bool is_sorted[ExecutionPolicy, Iter, Compare](ExecutionPolicy&& policy, Iter first, Iter last, Compare comp) except + + + Iter is_sorted_until[Iter](Iter first, Iter last) except + + Iter is_sorted_until[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + Iter is_sorted_until[Iter, Compare](Iter first, Iter last, Compare comp) except + + Iter is_sorted_until[ExecutionPolicy, Iter, Compare](ExecutionPolicy&& policy, Iter first, Iter last, Compare comp) except + + + void sort[Iter](Iter first, Iter last) except + + void sort[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + void sort[Iter, Compare](Iter first, Iter last, Compare comp) except + + void sort[ExecutionPolicy, Iter, Compare](ExecutionPolicy&& policy, Iter first, Iter last, Compare comp) except + + + void partial_sort[Iter](Iter first, Iter middle, Iter last) except + + void partial_sort[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter middle, Iter last) except + + void partial_sort[Iter, Compare](Iter first, Iter middle, Iter last, Compare comp) except + + void partial_sort[ExecutionPolicy, Iter, Compare](ExecutionPolicy&& policy, Iter first, Iter middle, Iter last, Compare comp) except + + + OutputIt partial_sort_copy[InputIt, OutputIt]( + InputIt first, InputIt last, OutputIt d_first, OutputIt d_last) except + + OutputIt partial_sort_copy[ExecutionPolicy, InputIt, OutputIt]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, OutputIt d_last) except + + OutputIt partial_sort_copy[InputIt, OutputIt, Compare]( + InputIt first, InputIt last, OutputIt d_first, OutputIt d_last, Compare comp) except + + OutputIt partial_sort_copy[ExecutionPolicy, InputIt, OutputIt, Compare]( + ExecutionPolicy&& policy, InputIt first, InputIt last, OutputIt d_first, OutputIt d_last, Compare comp) except + + + void stable_sort[Iter](Iter first, Iter last) except + + void stable_sort[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + void stable_sort[Iter, Compare](Iter first, Iter last, Compare comp) except + + void stable_sort[ExecutionPolicy, Iter, Compare](ExecutionPolicy&& policy, Iter first, Iter last, Compare comp) except + + + void nth_element[Iter](Iter first, Iter nth, Iter last) except + + void nth_element[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter nth, Iter last) except + + void nth_element[Iter, Compare](Iter first, Iter nth, Iter last, Compare comp) except + + void nth_element[ExecutionPolicy, Iter, Compare](ExecutionPolicy&& policy, Iter first, Iter nth, Iter last, Compare comp) except + + + # Binary search operations (on sorted ranges) + Iter lower_bound[Iter, T](Iter first, Iter last, const T& value) except + + Iter lower_bound[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + Iter lower_bound[Iter, T, Compare](Iter first, Iter last, const T& value, Compare comp) except + + Iter lower_bound[ExecutionPolicy, Iter, T, Compare](ExecutionPolicy&& policy, Iter first, Iter last, const T& value, Compare comp) except + + + Iter upper_bound[Iter, T](Iter first, Iter last, const T& value) except + + Iter upper_bound[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + Iter upper_bound[Iter, T, Compare](Iter first, Iter last, const T& value, Compare comp) except + + Iter upper_bound[ExecutionPolicy, Iter, T, Compare](ExecutionPolicy&& policy, Iter first, Iter last, const T& value, Compare comp) except + + + bool binary_search[Iter, T](Iter first, Iter last, const T& value) except + + bool binary_search[ExecutionPolicy, Iter, T](ExecutionPolicy&& policy, Iter first, Iter last, const T& value) except + + bool binary_search[Iter, T, Compare](Iter first, Iter last, const T& value, Compare comp) except + + bool binary_search[ExecutionPolicy, Iter, T, Compare](ExecutionPolicy&& policy, Iter first, Iter last, const T& value, Compare comp) except + + + # Other operations on sorted ranges + OutputIt merge[InputIt1, InputIt2, OutputIt]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out) except + + OutputIt merge[InputIt1, InputIt2, OutputIt, Compare]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out, Compare comp) except + + + void inplace_merge[BidirIt](BidirIt first, BidirIt middle, BidirIt last) except + + void inplace_merge[BidirIt, Compare](BidirIt first, BidirIt middle, BidirIt last, Compare comp) except + + + # Set operations (on sorted ranges) + bool includes[InputIt1, InputIt2]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2) except + + + bool includes[InputIt1, InputIt2, Compare]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, Compare comp) except + + + OutputIt set_difference[InputIt1, InputIt2, OutputIt]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out) except + + + OutputIt set_difference[InputIt1, InputIt2, OutputIt, Compare]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, + OutputIt out, Compare comp) except + + + OutputIt set_intersection[InputIt1, InputIt2, OutputIt]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out) except + + + OutputIt set_intersection[InputIt1, InputIt2, OutputIt, Compare]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out, Compare comp) except + + + OutputIt set_symmetric_difference[InputIt1, InputIt2, OutputIt]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out) except + + + OutputIt set_symmetric_difference[InputIt1, InputIt2, OutputIt, Compare]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out, Compare comp) except + + + OutputIt set_union[InputIt1, InputIt2, OutputIt]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out) except + + + OutputIt set_union[InputIt1, InputIt2, OutputIt, Compare]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, OutputIt out, Compare comp) except + + + # Heap operations + void make_heap[Iter](Iter first, Iter last) except + + void make_heap[Iter, Compare](Iter first, Iter last, Compare comp) except + + + void push_heap[Iter](Iter first, Iter last) except + + void push_heap[Iter, Compare](Iter first, Iter last, Compare comp) except + + + void pop_heap[Iter](Iter first, Iter last) except + + void pop_heap[Iter, Compare](Iter first, Iter last, Compare comp) except + + + void sort_heap[Iter](Iter first, Iter last) except + + void sort_heap[Iter, Compare](Iter first, Iter last, Compare comp) except + + + # Minimum/maximum operations + Iter min_element[Iter](Iter first, Iter last) except + + Iter min_element[Iter, Compare](Iter first, Iter last, Compare comp) except + + Iter min_element[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + Iter max_element[Iter](Iter first, Iter last) except + + Iter max_element[Iter, Compare](Iter first, Iter last, Compare comp) except + + Iter max_element[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + pair[T, T] minmax[T](const T& a, const T& b) except + + pair[T, T] minmax[T, Compare](const T& a, const T& b, Compare comp) except + + pair[Iter, Iter] minmax_element[Iter](Iter first, Iter last) except + + pair[Iter, Iter] minmax_element[Iter, Compare](Iter first, Iter last, Compare comp) except + + pair[Iter, Iter] minmax_element[ExecutionPolicy, Iter](ExecutionPolicy&& policy, Iter first, Iter last) except + + const T& clamp[T](const T& v, const T& lo, const T& hi) except + + const T& clamp[T, Compare](const T& v, const T& lo, const T& hi, Compare comp) except + + + # Comparison operations + bool equal[InputIt1, InputIt2](InputIt1 first1, InputIt1 last1, InputIt2 first2) except + + bool equal[InputIt1, InputIt2, BinPred](InputIt1 first1, InputIt1 last1, InputIt2 first2, BinPred pred) except + + # ambiguous with previous overload + #bool equal[InputIt1, InputIt2](InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2) except + + bool equal[InputIt1, InputIt2, BinPred](InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, BinPred pred) except + + + bool lexicographical_compare[InputIt1, InputIt2](InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2) except + + # ambiguous with next overload + #bool lexicographical_compare[InputIt1, InputIt2, ExecutionPolicy](ExecutionPolicy&& policy, InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2) except + + bool lexicographical_compare[InputIt1, InputIt2, Compare](InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2, Compare comp) except + + + # Permutation operations + bool is_permutation[ForwardIt1, ForwardIt2](ForwardIt1 first1, ForwardIt1 last1, ForwardIt2 first2) except + + bool is_permutation[ForwardIt1, ForwardIt2, BinaryPred](ForwardIt1 first1, ForwardIt1 last1, ForwardIt2 first2, BinaryPred p) except + + # ambiguous with previous overload + #bool is_permutation[ForwardIt1, ForwardIt2](ForwardIt1 first1, ForwardIt1 last1, ForwardIt2 first2, ForwardIt2 last2) except + + bool is_permutation[ForwardIt1, ForwardIt2, BinaryPred](ForwardIt1 first1, ForwardIt1 last1, ForwardIt2 first2, ForwardIt2 last2, BinaryPred p) except + + bool next_permutation[BidirIt](BidirIt first, BidirIt last) except + + bool next_permutation[BidirIt, Compare](BidirIt first, BidirIt last, Compare comp) except + + bool prev_permutation[BidirIt](BidirIt first, BidirIt last) except + + bool prev_permutation[BidirIt, Compare](BidirIt first, BidirIt last, Compare comp) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/any.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/any.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7c0d000cdbb963e1c6f89b334f78334c9ab4f9ed --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/any.pxd @@ -0,0 +1,16 @@ +from libcpp cimport bool +from libcpp.typeinfo cimport type_info + +cdef extern from "" namespace "std" nogil: + cdef cppclass any: + any() + any(any&) except + + void reset() + bool has_value() + type_info& type() + T& emplace[T](...) except + + void swap(any&) + any& operator=(any&) except + + any& operator=[U](U&) except + + + cdef T any_cast[T](any&) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/complex.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/complex.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9d8a700ea00f900def1a2b47f8c79d516d5d202b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/complex.pxd @@ -0,0 +1,106 @@ +# Note: add integer versions of the functions? + +cdef extern from "" namespace "std" nogil: + cdef cppclass complex[T]: + complex() except + + complex(T, T) except + + complex(complex[T]&) except + + # How to make the converting constructor, i.e. convert complex[double] + # to complex[float]? + + complex[T] operator+(complex[T]&, complex[T]&) + complex[T] operator+(complex[T]&, T&) + complex[T] operator+(T&, complex[T]&) + + complex[T] operator-(complex[T]&, complex[T]&) + complex[T] operator-(complex[T]&, T&) + complex[T] operator-(T&, complex[T]&) + + complex[T] operator*(complex[T]&, complex[T]&) + complex[T] operator*(complex[T]&, T&) + complex[T] operator*(T&, complex[T]&) + + complex[T] operator/(complex[T]&, complex[T]&) + complex[T] operator/(complex[T]&, T&) + complex[T] operator/(T&, complex[T]&) + + complex[T] operator+() + complex[T] operator-() + + bint operator==(complex[T]&, complex[T]&) + bint operator==(complex[T]&, T&) + bint operator==(T&, complex[T]&) + bint operator!=(complex[T]&, complex[T]&) + bint operator!=(complex[T]&, T&) + bint operator!=(T&, complex[T]&) + + # Access real part + T real() + void real(T) + + # Access imaginary part + T imag() + void imag(T) + + # Return real part + T real[T](complex[T]&) + long double real(long double) + double real(double) + float real(float) + + # Return imaginary part + T imag[T](complex[T]&) + long double imag(long double) + double imag(double) + float imag(float) + + T abs[T](complex[T]&) + + T arg[T](complex[T]&) + long double arg(long double) + double arg(double) + float arg(float) + + T norm[T](complex[T]) + long double norm(long double) + double norm(double) + float norm(float) + + complex[T] conj[T](complex[T]&) + complex[long double] conj(long double) + complex[double] conj(double) + complex[float] conj(float) + + complex[T] proj[T](complex[T]) + complex[long double] proj(long double) + complex[double] proj(double) + complex[float] proj(float) + + complex[T] polar[T](T&, T&) + complex[T] polar[T](T&) + + complex[T] exp[T](complex[T]&) + complex[T] log[T](complex[T]&) + complex[T] log10[T](complex[T]&) + + complex[T] pow[T](complex[T]&, complex[T]&) + complex[T] pow[T](complex[T]&, T&) + complex[T] pow[T](T&, complex[T]&) + # There are some promotion versions too + + complex[T] sqrt[T](complex[T]&) + + complex[T] sin[T](complex[T]&) + complex[T] cos[T](complex[T]&) + complex[T] tan[T](complex[T]&) + complex[T] asin[T](complex[T]&) + complex[T] acos[T](complex[T]&) + complex[T] atan[T](complex[T]&) + + complex[T] sinh[T](complex[T]&) + complex[T] cosh[T](complex[T]&) + complex[T] tanh[T](complex[T]&) + + complex[T] asinh[T](complex[T]&) + complex[T] acosh[T](complex[T]&) + complex[T] atanh[T](complex[T]&) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/execution.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/execution.pxd new file mode 100644 index 0000000000000000000000000000000000000000..eb92e3404f6ba337a1003139b9f24b55ffa2ee66 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/execution.pxd @@ -0,0 +1,15 @@ + +cdef extern from "" namespace "std::execution" nogil: + cdef cppclass sequenced_policy: + pass + cdef cppclass parallel_policy: + pass + cdef cppclass parallel_unsequenced_policy: + pass + cdef cppclass unsequenced_policy: + pass + + const sequenced_policy seq "std::execution::seq" + const parallel_policy par "std::execution::par" + const parallel_unsequenced_policy par_unseq "std::execution::par_unseq" + const unsequenced_policy unseq "std::execution::unseq" diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/functional.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/functional.pxd new file mode 100644 index 0000000000000000000000000000000000000000..596ea90da0198c44bb5fd4807ff62bebf7480371 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/functional.pxd @@ -0,0 +1,26 @@ +from libcpp cimport bool + +cdef extern from "" namespace "std" nogil: + cdef cppclass function[T]: + function() except + + function(T*) except + + function(function&) except + + function(void*) except + + + function operator=(T*) + function operator=(function&) + function operator=(void*) + function operator=[U](U) + + bool operator bool() + + # Comparisons + cdef cppclass greater[T=*]: + # https://github.com/cython/cython/issues/3193 + greater() except + + bool operator()(const T& lhs, const T& rhs) except + + + cdef cppclass reference_wrapper[T]: + reference_wrapper() + reference_wrapper(T) + T& get() const diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/optional.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/optional.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9b0b07a6d2bae85a7dde810b4981b6536479cd31 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/optional.pxd @@ -0,0 +1,34 @@ +from libcpp cimport bool + +cdef extern from "" namespace "std" nogil: + cdef cppclass nullopt_t: + nullopt_t() + + cdef nullopt_t nullopt + + cdef cppclass optional[T]: + ctypedef T value_type + optional() + optional(nullopt_t) + optional(optional&) except + + optional(T&) except + + bool has_value() + T& value() except + + T& value_or[U](U& default_value) + void swap(optional&) + void reset() + T& emplace(...) + T& operator*() + #T* operator->() # Not Supported + optional& operator=(optional&) + optional& operator=[U](U&) + bool operator bool() + bool operator!() + bool operator==[U](optional&, U&) + bool operator!=[U](optional&, U&) + bool operator<[U](optional&, U&) + bool operator>[U](optional&, U&) + bool operator<=[U](optional&, U&) + bool operator>=[U](optional&, U&) + + optional[T] make_optional[T](...) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/queue.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/queue.pxd new file mode 100644 index 0000000000000000000000000000000000000000..578cbd91599d805d1508157be3a50984d8e0a8e5 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/queue.pxd @@ -0,0 +1,25 @@ +cdef extern from "" namespace "std" nogil: + cdef cppclass queue[T]: + queue() except + + queue(queue&) except + + #queue(Container&) + T& back() + bint empty() + T& front() + void pop() + void push(T&) + size_t size() + # C++11 methods + void swap(queue&) + + cdef cppclass priority_queue[T]: + priority_queue() except + + priority_queue(priority_queue&) except + + #priority_queue(Container&) + bint empty() + void pop() + void push(T&) + size_t size() + T& top() + # C++11 methods + void swap(priority_queue&) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/stack.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/stack.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f92240f66699cb384ce36771d0da9a90c04cd1a3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/stack.pxd @@ -0,0 +1,11 @@ +cdef extern from "" namespace "std" nogil: + cdef cppclass stack[T]: + ctypedef T value_type + stack() except + + stack(stack&) except + + #stack(Container&) + bint empty() + void pop() + void push(T&) except + + size_t size() + T& top() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/string.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/string.pxd new file mode 100644 index 0000000000000000000000000000000000000000..566c748f5f62ac6c0f49940abdaef8bf85ec5118 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/string.pxd @@ -0,0 +1,333 @@ + +# deprecated cimport for backwards compatibility: +from libc.string cimport const_char + +cdef extern from "" namespace "std::string" nogil: + const size_t npos + +cdef extern from "" namespace "std" nogil: + cdef cppclass string: + ctypedef char value_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + value_type& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + iterator operator+(size_type) + iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + bint operator<(iterator) + bint operator<(const_iterator) + bint operator>(iterator) + bint operator>(const_iterator) + bint operator<=(iterator) + bint operator<=(const_iterator) + bint operator>=(iterator) + bint operator>=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + const value_type& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + const_iterator operator+(size_type) + const_iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + bint operator<(iterator) + bint operator<(const_iterator) + bint operator>(iterator) + bint operator>(const_iterator) + bint operator<=(iterator) + bint operator<=(const_iterator) + bint operator>=(iterator) + bint operator>=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + value_type& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + reverse_iterator operator+(size_type) + reverse_iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + bint operator<(reverse_iterator) + bint operator<(const_reverse_iterator) + bint operator>(reverse_iterator) + bint operator>(const_reverse_iterator) + bint operator<=(reverse_iterator) + bint operator<=(const_reverse_iterator) + bint operator>=(reverse_iterator) + bint operator>=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + const value_type& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + const_reverse_iterator operator+(size_type) + const_reverse_iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + bint operator<(reverse_iterator) + bint operator<(const_reverse_iterator) + bint operator>(reverse_iterator) + bint operator>(const_reverse_iterator) + bint operator<=(reverse_iterator) + bint operator<=(const_reverse_iterator) + bint operator>=(reverse_iterator) + bint operator>=(const_reverse_iterator) + + string() except + + string(const string& s) except + + string(const string& s, size_t pos) except + + string(const string& s, size_t pos, size_t len) except + + string(const char* s) except + + string(const char* s, size_t n) except + + string(size_t n, char c) except + + string(iterator first, iterator last) except + + + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + + const char* c_str() + const char* data() + size_t size() + size_t max_size() + size_t length() + void resize(size_t) except + + void resize(size_t, char) except + + void shrink_to_fit() except + + void swap(string& other) + size_t capacity() + void reserve(size_t) except + + void clear() + bint empty() + + iterator erase(iterator first, iterator last) + iterator erase(iterator p) + iterator erase(const_iterator first, const_iterator last) + iterator erase(const_iterator p) + string& erase(size_t pos, size_t len) except + + string& erase(size_t pos) except + + string& erase() except + + + char& at(size_t pos) except + + char& operator[](size_t pos) + char& front() + char& back() + int compare(const string& s) + int compare(size_t pos, size_t len, const string& s) except + + int compare(size_t pos, size_t len, const string& s, size_t subpos, size_t sublen) except + + int compare(const char* s) except + + int compare(size_t pos, size_t len, const char* s) except + + int compare(size_t pos, size_t len, const char* s , size_t n) except + + + string& append(const string& s) except + + string& append(const string& s, size_t subpos, size_t sublen) except + + string& append(const char* s) except + + string& append(const char* s, size_t n) except + + string& append(size_t n, char c) except + + + void push_back(char c) except + + void pop_back() + + string& assign(const string& s) except + + string& assign(const string& s, size_t subpos, size_t sublen) except + + string& assign(const char* s, size_t n) except + + string& assign(const char* s) except + + string& assign(size_t n, char c) except + + + string& insert(size_t pos, const string& s, size_t subpos, size_t sublen) except + + string& insert(size_t pos, const string& s) except + + string& insert(size_t pos, const char* s, size_t n) except + + string& insert(size_t pos, const char* s) except + + string& insert(size_t pos, size_t n, char c) except + + void insert(iterator p, size_t n, char c) except + + iterator insert(iterator p, char c) except + + + string& replace(size_t pos, size_t len, const string& str) except + + string& replace(iterator i1, iterator i2, const string& str) except + + string& replace(size_t pos, size_t len, const string& str, size_t subpos, size_t sublen) except + + string& replace(size_t pos, size_t len, const char* s) except + + string& replace(iterator i1, iterator i2, const char* s) except + + string& replace(size_t pos, size_t len, const char* s, size_t n) except + + string& replace(iterator i1, iterator i2, const char* s, size_t n) except + + string& replace(size_t pos, size_t len, size_t n, char c) except + + string& replace(iterator i1, iterator i2, size_t n, char c) except + + + size_t copy(char* s, size_t len, size_t pos) except + + size_t copy(char* s, size_t len) except + + + size_t find(const string& s, size_t pos) + size_t find(const string& s) + size_t find(const char* s, size_t pos, size_t n) + size_t find(const char* s, size_t pos) + size_t find(const char* s) + size_t find(char c, size_t pos) + size_t find(char c) + + size_t rfind(const string&, size_t pos) + size_t rfind(const string&) + size_t rfind(const char* s, size_t pos, size_t n) + size_t rfind(const char* s, size_t pos) + size_t rfind(const char* s) + size_t rfind(char c, size_t pos) + size_t rfind(char c) + + size_t find_first_of(const string&, size_t pos) + size_t find_first_of(const string&) + size_t find_first_of(const char* s, size_t pos, size_t n) + size_t find_first_of(const char* s, size_t pos) + size_t find_first_of(const char* s) + size_t find_first_of(char c, size_t pos) + size_t find_first_of(char c) + + size_t find_first_not_of(const string& s, size_t pos) + size_t find_first_not_of(const string& s) + size_t find_first_not_of(const char* s, size_t pos, size_t n) + size_t find_first_not_of(const char* s, size_t pos) + size_t find_first_not_of(const char*) + size_t find_first_not_of(char c, size_t pos) + size_t find_first_not_of(char c) + + size_t find_last_of(const string& s, size_t pos) + size_t find_last_of(const string& s) + size_t find_last_of(const char* s, size_t pos, size_t n) + size_t find_last_of(const char* s, size_t pos) + size_t find_last_of(const char* s) + size_t find_last_of(char c, size_t pos) + size_t find_last_of(char c) + + size_t find_last_not_of(const string& s, size_t pos) + size_t find_last_not_of(const string& s) + size_t find_last_not_of(const char* s, size_t pos, size_t n) + size_t find_last_not_of(const char* s, size_t pos) + size_t find_last_not_of(const char* s) + size_t find_last_not_of(char c, size_t pos) + size_t find_last_not_of(char c) + + string substr(size_t pos, size_t len) except + + string substr(size_t pos) except + + string substr() + + # C++20 + bint starts_with(char c) except + + bint starts_with(const char* s) + bint ends_with(char c) except + + bint ends_with(const char* s) + # C++23 + bint contains(char c) except + + bint contains(const char* s) + + #string& operator= (const string&) + #string& operator= (const char*) + #string& operator= (char) + + string operator+ (const string&) except + + string operator+ (const char*) except + + + bint operator==(const string&) + bint operator==(const char*) + + bint operator!= (const string&) + bint operator!= (const char*) + + bint operator< (const string&) + bint operator< (const char*) + + bint operator> (const string&) + bint operator> (const char*) + + bint operator<= (const string&) + bint operator<= (const char*) + + bint operator>= (const string&) + bint operator>= (const char*) + + + string to_string(int val) except + + string to_string(long val) except + + string to_string(long long val) except + + string to_string(unsigned val) except + + string to_string(size_t val) except + + string to_string(ssize_t val) except + + string to_string(unsigned long val) except + + string to_string(unsigned long long val) except + + string to_string(float val) except + + string to_string(double val) except + + string to_string(long double val) except + + + int stoi(const string& s, size_t* idx, int base) except + + int stoi(const string& s, size_t* idx) except + + int stoi(const string& s) except + + long stol(const string& s, size_t* idx, int base) except + + long stol(const string& s, size_t* idx) except + + long stol(const string& s) except + + long long stoll(const string& s, size_t* idx, int base) except + + long long stoll(const string& s, size_t* idx) except + + long long stoll(const string& s) except + + + unsigned long stoul(const string& s, size_t* idx, int base) except + + unsigned long stoul(const string& s, size_t* idx) except + + unsigned long stoul(const string& s) except + + unsigned long long stoull(const string& s, size_t* idx, int base) except + + unsigned long long stoull(const string& s, size_t* idx) except + + unsigned long long stoull(const string& s) except + + + float stof(const string& s, size_t* idx) except + + float stof(const string& s) except + + double stod(const string& s, size_t* idx) except + + double stod(const string& s) except + + long double stold(const string& s, size_t* idx) except + + long double stold(const string& s) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/unordered_set.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/unordered_set.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6aae890d93015b836783888e5f58d7aaf109038f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/unordered_set.pxd @@ -0,0 +1,152 @@ +from .utility cimport pair + +cdef extern from "" namespace "std" nogil: + cdef cppclass unordered_set[T,HASH=*,PRED=*,ALLOCATOR=*]: + ctypedef T value_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + value_type& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + operator=(iterator&) except + + const value_type& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + unordered_set() except + + unordered_set(unordered_set&) except + + #unordered_set& operator=(unordered_set&) + bint operator==(unordered_set&, unordered_set&) + bint operator!=(unordered_set&, unordered_set&) + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + void clear() + size_t count(const T&) + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + pair[iterator, iterator] equal_range(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + iterator erase(iterator) + iterator const_erase "erase"(const_iterator) + iterator erase(const_iterator, const_iterator) + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find"(const T&) + pair[iterator, bint] insert(const T&) except + + iterator insert(const_iterator, const T&) except + + void insert[InputIt](InputIt, InputIt) except + + size_t max_size() + size_t size() + void swap(unordered_set&) + #value_compare value_comp() + void max_load_factor(float) + float max_load_factor() + float load_factor() + void rehash(size_t) + void reserve(size_t) + size_t bucket_count() + size_t max_bucket_count() + size_t bucket_size(size_t) + size_t bucket(const T&) + # C++20 + bint contains(const T&) + + cdef cppclass unordered_multiset[T,HASH=*,PRED=*,ALLOCATOR=*]: + ctypedef T value_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + value_type& operator*() + iterator operator++() + iterator operator++(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + operator=(iterator&) except + + const value_type& operator*() + const_iterator operator++() + const_iterator operator++(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + unordered_multiset() except + + unordered_multiset(unordered_multiset&) except + + #unordered_multiset& operator=(unordered_multiset&) + bint operator==(unordered_multiset&, unordered_multiset&) + bint operator!=(unordered_multiset&, unordered_multiset&) + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + void clear() + size_t count(const T&) + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + pair[iterator, iterator] equal_range(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + iterator erase(iterator) + iterator const_erase "erase"(const_iterator) + iterator erase(const_iterator, const_iterator) + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find"(const T&) + iterator insert(const T&) except + + iterator insert(const_iterator, const T&) except + + void insert[InputIt](InputIt, InputIt) except + + size_t max_size() + size_t size() + void swap(unordered_multiset&) + #value_compare value_comp() + void max_load_factor(float) + float max_load_factor() + float load_factor() + void rehash(size_t) + void reserve(size_t) + size_t bucket_count() + size_t max_bucket_count() + size_t bucket_size(size_t) + size_t bucket(const T&) + # C++20 + bint contains(const T&) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/numpy/__init__.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/numpy/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..228aee4cff89203e4190bcdcb8331f3a2118537d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/numpy/__init__.pxd @@ -0,0 +1,1057 @@ +# NumPy static imports for Cython +# +# NOTE: Do not make incompatible local changes to this file without contacting the NumPy project. +# This file is maintained by the NumPy project at +# https://github.com/numpy/numpy/tree/master/numpy +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from Cython and not from NumPy itself. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "Cython/Includes/numpy/" */ + """ + + +cdef extern from "Python.h": + ctypedef Py_ssize_t Py_intptr_t + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_UPDATEIFCOPY + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef object fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + double real + double imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISPYTHON(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISPYTHON(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISPYTHON(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + void PyArray_XDECREF_ERR(ndarray) + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) + int PyArray_XDECREF (ndarray) + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) + int PyArray_CastAnyTo (ndarray, ndarray) + int PyArray_CanCastSafely (int, int) + npy_bool PyArray_CanCastTo (dtype, dtype) + int PyArray_ObjectType (object, int) + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) + int PyArray_CopyInto (ndarray, ndarray) + int PyArray_CopyAnyInto (ndarray, ndarray) + int PyArray_CopyObject (ndarray, object) + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) + int PyArray_Dump (object, object, int) + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) + void PyArray_FillObjectArray (ndarray, object) + int PyArray_FillWithScalar (ndarray, object) + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) + void PyArray_Item_XDECREF (char *, dtype) + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) + #int PyArray_DescrConverter2 (object, dtype*) + int PyArray_IntpConverter (object, PyArray_Dims *) + #int PyArray_BufferConverter (object, chunk) + int PyArray_AxisConverter (object, int *) + int PyArray_BoolConverter (object, npy_bool *) + int PyArray_ByteorderConverter (object, char *) + int PyArray_OrderConverter (object, NPY_ORDER *) + unsigned char PyArray_EquivTypes (dtype, dtype) + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) + #int PyArray_OutputConverter (object, ndarray*) + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) + #int PyArray_DescrAlignConverter2 (object, dtype*) + int PyArray_SearchsideConverter (object, void *) + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) + int PyUFunc_GenericFunction \ + (ufunc, PyObject *, PyObject *, PyArrayObject **) + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/numpy/math.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/numpy/math.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c16df1c51a5543ea963d44f30d2027414b6cdf40 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/numpy/math.pxd @@ -0,0 +1,133 @@ +# NumPy math library +# +# This exports the functionality of the NumPy core math library, aka npymath, +# which provides implementations of C99 math functions and macros for system +# with a C89 library (such as MSVC). npymath is available with NumPy >=1.3, +# although some functions will require later versions. The spacing function is +# not in C99, but comes from Fortran. +# +# On the Cython side, the npymath functions are available without the "npy_" +# prefix that they have in C, to make this is a drop-in replacement for +# libc.math. The same is true for the constants, where possible. +# +# See the NumPy documentation for linking instructions. +# +# Complex number support and NumPy 2.0 half-precision functions are currently +# not exported. +# +# Author: Lars Buitinck + +cdef extern from "numpy/npy_math.h" nogil: + # Floating-point classification + long double NAN "NPY_NAN" + long double INFINITY "NPY_INFINITY" + long double PZERO "NPY_PZERO" # positive zero + long double NZERO "NPY_NZERO" # negative zero + + # These four are actually macros and work on any floating-point type. + int isinf "npy_isinf"(long double) # -1 / 0 / 1 + bint isfinite "npy_isfinite"(long double) + bint isnan "npy_isnan"(long double) + bint signbit "npy_signbit"(long double) + + # Math constants + long double E "NPY_E" + long double LOG2E "NPY_LOG2E" # ln(e) / ln(2) + long double LOG10E "NPY_LOG10E" # ln(e) / ln(10) + long double LOGE2 "NPY_LOGE2" # ln(2) + long double LOGE10 "NPY_LOGE10" # ln(10) + long double PI "NPY_PI" + long double PI_2 "NPY_PI_2" # pi / 2 + long double PI_4 "NPY_PI_4" # pi / 4 + long double NPY_1_PI # 1 / pi; NPY_ because of ident syntax + long double NPY_2_PI # 2 / pi + long double EULER "NPY_EULER" # Euler constant (gamma, 0.57721) + + # Low-level floating point manipulation (NumPy >=1.4) + float copysignf "npy_copysignf"(float, float) + float nextafterf "npy_nextafterf"(float x, float y) + float spacingf "npy_spacingf"(float x) + double copysign "npy_copysign"(double, double) + double nextafter "npy_nextafter"(double x, double y) + double spacing "npy_spacing"(double x) + long double copysignl "npy_copysignl"(long double, long double) + long double nextafterl "npy_nextafterl"(long double x, long double y) + long double spacingl "npy_spacingl"(long double x) + + # Float C99 functions + float sinf "npy_sinf"(float x) + float cosf "npy_cosf"(float x) + float tanf "npy_tanf"(float x) + float sinhf "npy_sinhf"(float x) + float coshf "npy_coshf"(float x) + float tanhf "npy_tanhf"(float x) + float fabsf "npy_fabsf"(float x) + float floorf "npy_floorf"(float x) + float ceilf "npy_ceilf"(float x) + float rintf "npy_rintf"(float x) + float sqrtf "npy_sqrtf"(float x) + float log10f "npy_log10f"(float x) + float logf "npy_logf"(float x) + float expf "npy_expf"(float x) + float expm1f "npy_expm1f"(float x) + float asinf "npy_asinf"(float x) + float acosf "npy_acosf"(float x) + float atanf "npy_atanf"(float x) + float asinhf "npy_asinhf"(float x) + float acoshf "npy_acoshf"(float x) + float atanhf "npy_atanhf"(float x) + float log1pf "npy_log1pf"(float x) + float exp2f "npy_exp2f"(float x) + float log2f "npy_log2f"(float x) + float atan2f "npy_atan2f"(float x, float y) + float hypotf "npy_hypotf"(float x, float y) + float powf "npy_powf"(float x, float y) + float fmodf "npy_fmodf"(float x, float y) + float modff "npy_modff"(float x, float* y) + + # Long double C99 functions + long double sinl "npy_sinl"(long double x) + long double cosl "npy_cosl"(long double x) + long double tanl "npy_tanl"(long double x) + long double sinhl "npy_sinhl"(long double x) + long double coshl "npy_coshl"(long double x) + long double tanhl "npy_tanhl"(long double x) + long double fabsl "npy_fabsl"(long double x) + long double floorl "npy_floorl"(long double x) + long double ceill "npy_ceill"(long double x) + long double rintl "npy_rintl"(long double x) + long double sqrtl "npy_sqrtl"(long double x) + long double log10l "npy_log10l"(long double x) + long double logl "npy_logl"(long double x) + long double expl "npy_expl"(long double x) + long double expm1l "npy_expm1l"(long double x) + long double asinl "npy_asinl"(long double x) + long double acosl "npy_acosl"(long double x) + long double atanl "npy_atanl"(long double x) + long double asinhl "npy_asinhl"(long double x) + long double acoshl "npy_acoshl"(long double x) + long double atanhl "npy_atanhl"(long double x) + long double log1pl "npy_log1pl"(long double x) + long double exp2l "npy_exp2l"(long double x) + long double log2l "npy_log2l"(long double x) + long double atan2l "npy_atan2l"(long double x, long double y) + long double hypotl "npy_hypotl"(long double x, long double y) + long double powl "npy_powl"(long double x, long double y) + long double fmodl "npy_fmodl"(long double x, long double y) + long double modfl "npy_modfl"(long double x, long double* y) + + # NumPy extensions + float deg2radf "npy_deg2radf"(float x) + float rad2degf "npy_rad2degf"(float x) + float logaddexpf "npy_logaddexpf"(float x, float y) + float logaddexp2f "npy_logaddexp2f"(float x, float y) + + double deg2rad "npy_deg2rad"(double x) + double rad2deg "npy_rad2deg"(double x) + double logaddexp "npy_logaddexp"(double x, double y) + double logaddexp2 "npy_logaddexp2"(double x, double y) + + long double deg2radl "npy_deg2radl"(long double x) + long double rad2degl "npy_rad2degl"(long double x) + long double logaddexpl "npy_logaddexpl"(long double x, long double y) + long double logaddexp2l "npy_logaddexp2l"(long double x, long double y) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/dlfcn.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/dlfcn.pxd new file mode 100644 index 0000000000000000000000000000000000000000..bf61997f341f92f124fde3487b2a0616da63decf --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/dlfcn.pxd @@ -0,0 +1,14 @@ +# POSIX dynamic linking/loading interface. +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/dlfcn.h.html + +cdef extern from "" nogil: + void *dlopen(const char *, int) + char *dlerror() + void *dlsym(void *, const char *) + int dlclose(void *) + + enum: + RTLD_LAZY + RTLD_NOW + RTLD_GLOBAL + RTLD_LOCAL diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/ioctl.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/ioctl.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dacbc307f3f19b92d84bd655ae3eaa92f9385f6f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/ioctl.pxd @@ -0,0 +1,4 @@ +cdef extern from "" nogil: + enum: FIONBIO + + int ioctl(int fd, int request, ...) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/resource.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/resource.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b9628c66bdf68488e285b24550d4079bcf22aa01 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/resource.pxd @@ -0,0 +1,57 @@ +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html +# https://man7.org/linux/man-pages/man2/getrusage.2.html + +from posix.time cimport timeval +from posix.types cimport id_t + +cdef extern from "" nogil: + + enum: PRIO_PROCESS + enum: PRIO_PGRP + enum: PRIO_USER + + enum: RLIM_INFINITY + enum: RLIM_SAVED_MAX + enum: RLIM_SAVED_CUR + + enum: RUSAGE_SELF + enum: RUSAGE_CHILDREN + + enum: RLIMIT_CORE + enum: RLIMIT_CPU + enum: RLIMIT_DATA + enum: RLIMIT_FSIZE + enum: RLIMIT_NOFILE + enum: RLIMIT_STACK + enum: RLIMIT_AS + + ctypedef unsigned long rlim_t + + cdef struct rlimit: + rlim_t rlim_cur + rlim_t rlim_max + + cdef struct rusage: + timeval ru_utime + timeval ru_stime + # Linux-specific + long ru_maxrss + long ru_ixrss + long ru_idrss + long ru_isrss + long ru_minflt + long ru_majflt + long ru_nswap + long ru_inblock + long ru_oublock + long ru_msgsnd + long ru_msgrcv + long ru_nsignals + long ru_nvcsw + long ru_nivcsw + + int getpriority(int, id_t) + int getrlimit(int, rlimit *) + int getrusage(int, rusage *) + int setpriority(int, id_t, int) + int setrlimit(int, const rlimit *) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/select.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/select.pxd new file mode 100644 index 0000000000000000000000000000000000000000..803c492d40fe95bde9a31ed7c68ac9f665d39651 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/select.pxd @@ -0,0 +1,21 @@ +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_select.h.html + +from .types cimport sigset_t +from .time cimport timeval, timespec + +cdef extern from "" nogil: + ctypedef struct fd_set: + pass + + int FD_SETSIZE + void FD_SET(int, fd_set*) + void FD_CLR(int, fd_set*) + bint FD_ISSET(int, fd_set*) + void FD_ZERO(fd_set*) + + int select(int nfds, fd_set *readfds, fd_set *writefds, + fd_set *exceptfds, timeval *timeout) + + int pselect(int nfds, fd_set *readfds, fd_set *writefds, + fd_set *exceptfds, const timespec *timeout, + const sigset_t *sigmask) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stat.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stat.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9247423f84e1a7690b2677c906c009be0c1c574d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/stat.pxd @@ -0,0 +1,98 @@ +# https://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/stat.h.html +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_stat.h.html + +from posix.types cimport (blkcnt_t, blksize_t, dev_t, gid_t, ino_t, mode_t, + nlink_t, off_t, time_t, uid_t) +from posix.time cimport timespec + + +cdef extern from "" nogil: + cdef struct struct_stat "stat": + dev_t st_dev + ino_t st_ino + mode_t st_mode + nlink_t st_nlink + uid_t st_uid + gid_t st_gid + dev_t st_rdev + off_t st_size + blksize_t st_blksize + blkcnt_t st_blocks + # POSIX.1-2001 + time_t st_atime + time_t st_mtime + time_t st_ctime + # POSIX.1-2008 + timespec st_atim + timespec st_mtim + timespec st_ctim + + # st_birthtime exists on *BSD and OS X. + # Under Linux, defining it here does not hurt. Compilation under Linux + # will only (and rightfully) fail when attempting to use the field. + time_t st_birthtime + +# POSIX prescribes including both and for these +cdef extern from "" nogil: + int chmod(const char *, mode_t) + int fchmod(int, mode_t) + int fchmodat(int, const char *, mode_t, int flags) + + int stat(const char *, struct_stat *) + int lstat(const char *, struct_stat *) + int fstat(int, struct_stat *) + int fstatat(int, const char *, struct_stat *, int flags) + + int mkdir(const char *, mode_t) + int mkdirat(int, const char *, mode_t) + int mkfifo(const char *, mode_t) + int mkfifoat(int, const char *, mode_t) + int mknod(const char *, mode_t, dev_t) + int mknodat(int, const char *, mode_t, dev_t) + + int futimens(int, const timespec *) + int utimensat(int, const char *, const timespec *, int flags) + + # Macros for st_mode + mode_t S_ISREG(mode_t) + mode_t S_ISDIR(mode_t) + mode_t S_ISCHR(mode_t) + mode_t S_ISBLK(mode_t) + mode_t S_ISFIFO(mode_t) + mode_t S_ISLNK(mode_t) + mode_t S_ISSOCK(mode_t) + + mode_t S_IFMT + mode_t S_IFREG + mode_t S_IFDIR + mode_t S_IFCHR + mode_t S_IFBLK + mode_t S_IFIFO + mode_t S_IFLNK + mode_t S_IFSOCK + + # Permissions + mode_t S_ISUID + mode_t S_ISGID + mode_t S_ISVTX + + mode_t S_IRWXU + mode_t S_IRUSR + mode_t S_IWUSR + mode_t S_IXUSR + + mode_t S_IRWXG + mode_t S_IRGRP + mode_t S_IWGRP + mode_t S_IXGRP + + mode_t S_IRWXO + mode_t S_IROTH + mode_t S_IWOTH + mode_t S_IXOTH + + # test file types + bint S_TYPEISMQ(struct_stat *buf) + bint S_TYPEISSEM(struct_stat *buf) + bint S_TYPEISSHM(struct_stat *buf) + bint S_TYPEISTMO(struct_stat *buf) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/strings.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/strings.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6ee48491eb833da21e5aefe1a8efe521c132cbf4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/strings.pxd @@ -0,0 +1,9 @@ +cdef extern from "" nogil: + int bcmp(const void *, const void *, size_t) + void bcopy(const void *, void *, size_t) + void bzero(void *, size_t) + int ffs(int) + char *index(const char *, int) + char *rindex(const char *, int) + int strcasecmp(const char *, const char *) + int strncasecmp(const char *, const char *, size_t) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/uio.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/uio.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d9971bd4a06bce03952fa3f4bdbf1ee4fa06d811 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/uio.pxd @@ -0,0 +1,26 @@ +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_uio.h.html + +from posix.types cimport off_t + + +cdef extern from "" nogil: + + cdef struct iovec: + void *iov_base + size_t iov_len + + ssize_t readv (int fd, const iovec *iov, int iovcnt) + ssize_t writev(int fd, const iovec *iov, int iovcnt) + + # Linux-specific, https://man7.org/linux/man-pages/man2/readv.2.html + ssize_t preadv (int fd, const iovec *iov, int iovcnt, off_t offset) + ssize_t pwritev(int fd, const iovec *iov, int iovcnt, off_t offset) + + enum: RWF_DSYNC + enum: RWF_HIPRI + enum: RWF_SYNC + enum: RWF_NOWAIT + enum: RWF_APPEND + + ssize_t preadv2 (int fd, const iovec *iov, int iovcnt, off_t offset, int flags) + ssize_t pwritev2(int fd, const iovec *iov, int iovcnt, off_t offset, int flags) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/unistd.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/unistd.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1afeca385456877c2d6ef0f59660f57012e81201 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/unistd.pxd @@ -0,0 +1,271 @@ +# http://www.opengroup.org/onlinepubs/009695399/basedefs/unistd.h.html + +from posix.types cimport gid_t, pid_t, off_t, uid_t + +cdef extern from "" nogil: + + #:NULL + + enum: R_OK + enum: W_OK + enum: X_OK + enum: F_OK + + enum: _CS_PATH + enum: _CS_POSIX_V6_ILP32_OFF32_CFLAGS + enum: _CS_POSIX_V6_ILP32_OFF32_LDFLAGS + enum: _CS_POSIX_V6_ILP32_OFF32_LIBS + enum: _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS + enum: _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS + enum: _CS_POSIX_V6_ILP32_OFFBIG_LIBS + enum: _CS_POSIX_V6_LP64_OFF64_CFLAGS + enum: _CS_POSIX_V6_LP64_OFF64_LDFLAGS + enum: _CS_POSIX_V6_LP64_OFF64_LIBS + enum: _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS + enum: _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS + enum: _CS_POSIX_V6_LPBIG_OFFBIG_LIBS + enum: _CS_POSIX_V6_WIDTH_RESTRICTED_ENVS + + enum: SEEK_SET + enum: SEEK_CUR + enum: SEEK_END + + enum: F_LOCK + enum: F_TEST + enum: F_TLOCK + enum: F_ULOCK + + enum: _PC_2_SYMLINKS + enum: _PC_ALLOC_SIZE_MIN + enum: _PC_ASYNC_IO + enum: _PC_CHOWN_RESTRICTED + enum: _PC_FILESIZEBITS + enum: _PC_LINK_MAX + enum: _PC_MAX_CANON + enum: _PC_MAX_INPUT + enum: _PC_NAME_MAX + enum: _PC_NO_TRUNC + enum: _PC_PATH_MAX + enum: _PC_PIPE_BUF + enum: _PC_PRIO_IO + enum: _PC_REC_INCR_XFER_SIZE + enum: _PC_REC_MIN_XFER_SIZE + enum: _PC_REC_XFER_ALIGN + enum: _PC_SYMLINK_MAX + enum: _PC_SYNC_IO + enum: _PC_VDISABLE + + enum: _SC_2_C_BIND + enum: _SC_2_C_DEV + enum: _SC_2_CHAR_TERM + enum: _SC_2_FORT_DEV + enum: _SC_2_FORT_RUN + enum: _SC_2_LOCALEDEF + enum: _SC_2_PBS + enum: _SC_2_PBS_ACCOUNTING + enum: _SC_2_PBS_CHECKPOINT + enum: _SC_2_PBS_LOCATE + enum: _SC_2_PBS_MESSAGE + enum: _SC_2_PBS_TRACK + enum: _SC_2_SW_DEV + enum: _SC_2_UPE + enum: _SC_2_VERSION + enum: _SC_ADVISORY_INFO + enum: _SC_AIO_LISTIO_MAX + enum: _SC_AIO_MAX + enum: _SC_AIO_PRIO_DELTA_MAX + enum: _SC_ARG_MAX + enum: _SC_ASYNCHRONOUS_IO + enum: _SC_ATEXIT_MAX + enum: _SC_BARRIERS + enum: _SC_BC_BASE_MAX + enum: _SC_BC_DIM_MAX + enum: _SC_BC_SCALE_MAX + enum: _SC_BC_STRING_MAX + enum: _SC_CHILD_MAX + enum: _SC_CLK_TCK + enum: _SC_CLOCK_SELECTION + enum: _SC_COLL_WEIGHTS_MAX + enum: _SC_CPUTIME + enum: _SC_DELAYTIMER_MAX + enum: _SC_EXPR_NEST_MAX + enum: _SC_FSYNC + enum: _SC_GETGR_R_SIZE_MAX + enum: _SC_GETPW_R_SIZE_MAX + enum: _SC_HOST_NAME_MAX + enum: _SC_IOV_MAX + enum: _SC_IPV6 + enum: _SC_JOB_CONTROL + enum: _SC_LINE_MAX + enum: _SC_LOGIN_NAME_MAX + enum: _SC_MAPPED_FILES + enum: _SC_MEMLOCK + enum: _SC_MEMLOCK_RANGE + enum: _SC_MEMORY_PROTECTION + enum: _SC_MESSAGE_PASSING + enum: _SC_MONOTONIC_CLOCK + enum: _SC_MQ_OPEN_MAX + enum: _SC_MQ_PRIO_MAX + enum: _SC_NGROUPS_MAX + enum: _SC_OPEN_MAX + enum: _SC_PAGE_SIZE + enum: _SC_PAGESIZE + enum: _SC_PRIORITIZED_IO + enum: _SC_PRIORITY_SCHEDULING + enum: _SC_RAW_SOCKETS + enum: _SC_RE_DUP_MAX + enum: _SC_READER_WRITER_LOCKS + enum: _SC_REALTIME_SIGNALS + enum: _SC_REGEXP + enum: _SC_RTSIG_MAX + enum: _SC_SAVED_IDS + enum: _SC_SEM_NSEMS_MAX + enum: _SC_SEM_VALUE_MAX + enum: _SC_SEMAPHORES + enum: _SC_SHARED_MEMORY_OBJECTS + enum: _SC_SHELL + enum: _SC_SIGQUEUE_MAX + enum: _SC_SPAWN + enum: _SC_SPIN_LOCKS + enum: _SC_SPORADIC_SERVER + enum: _SC_SS_REPL_MAX + enum: _SC_STREAM_MAX + enum: _SC_SYMLOOP_MAX + enum: _SC_SYNCHRONIZED_IO + enum: _SC_THREAD_ATTR_STACKADDR + enum: _SC_THREAD_ATTR_STACKSIZE + enum: _SC_THREAD_CPUTIME + enum: _SC_THREAD_DESTRUCTOR_ITERATIONS + enum: _SC_THREAD_KEYS_MAX + enum: _SC_THREAD_PRIO_INHERIT + enum: _SC_THREAD_PRIO_PROTECT + enum: _SC_THREAD_PRIORITY_SCHEDULING + enum: _SC_THREAD_PROCESS_SHARED + enum: _SC_THREAD_SAFE_FUNCTIONS + enum: _SC_THREAD_SPORADIC_SERVER + enum: _SC_THREAD_STACK_MIN + enum: _SC_THREAD_THREADS_MAX + enum: _SC_THREADS + enum: _SC_TIMEOUTS + enum: _SC_TIMER_MAX + enum: _SC_TIMERS + enum: _SC_TRACE + enum: _SC_TRACE_EVENT_FILTER + enum: _SC_TRACE_EVENT_NAME_MAX + enum: _SC_TRACE_INHERIT + enum: _SC_TRACE_LOG + enum: _SC_TRACE_NAME_MAX + enum: _SC_TRACE_SYS_MAX + enum: _SC_TRACE_USER_EVENT_MAX + enum: _SC_TTY_NAME_MAX + enum: _SC_TYPED_MEMORY_OBJECTS + enum: _SC_TZNAME_MAX + enum: _SC_V6_ILP32_OFF32 + enum: _SC_V6_ILP32_OFFBIG + enum: _SC_V6_LP64_OFF64 + enum: _SC_V6_LPBIG_OFFBIG + enum: _SC_VERSION + enum: _SC_XBS5_ILP32_OFF32 + enum: _SC_XBS5_ILP32_OFFBIG + enum: _SC_XBS5_LP64_OFF64 + enum: _SC_XBS5_LPBIG_OFFBIG + enum: _SC_XOPEN_CRYPT + enum: _SC_XOPEN_ENH_I18N + enum: _SC_XOPEN_LEGACY + enum: _SC_XOPEN_REALTIME + enum: _SC_XOPEN_REALTIME_THREADS + enum: _SC_XOPEN_SHM + enum: _SC_XOPEN_STREAMS + enum: _SC_XOPEN_UNIX + enum: _SC_XOPEN_VERSION + + enum: STDIN_FILENO #0 + enum: STDOUT_FILENO #1 + enum: STDERR_FILENO #2 + + ctypedef unsigned useconds_t + + int access(const char *, int) + unsigned alarm(unsigned) + int chdir(const char *) + int chown(const char *, uid_t, gid_t) + int close(int) + size_t confstr(int, char *, size_t) + char *crypt(const char *, const char *) + char *ctermid(char *) + int dup(int) + int dup2(int, int) + void encrypt(char[64], int) + int execl(const char *, const char *, ...) + int execle(const char *, const char *, ...) + int execlp(const char *, const char *, ...) + int execv(const char *, char *[]) + int execve(const char *, char *[], char *[]) + int execvp(const char *, char *[]) + void _exit(int) + int fchown(int, uid_t, gid_t) + int fchdir(int) + int fdatasync(int) + pid_t fork() + long fpathconf(int, int) + int fsync(int) + int ftruncate(int, off_t) + char *getcwd(char *, size_t) + gid_t getegid() + uid_t geteuid() + gid_t getgid() + int getgroups(int, gid_t []) + long gethostid() + int gethostname(char *, size_t) + char *getlogin() + int getlogin_r(char *, size_t) + int getopt(int, char * [], const char *) + pid_t getpgid(pid_t) + pid_t getpgrp() + pid_t getpid() + pid_t getppid() + pid_t getsid(pid_t) + uid_t getuid() + char *getwd(char *) + int isatty(int) + int lchown(const char *, uid_t, gid_t) + int link(const char *, const char *) + int lockf(int, int, off_t) + off_t lseek(int, off_t, int) + int nice(int) + long pathconf(char *, int) + int pause() + int pipe(int [2]) + ssize_t pread(int, void *, size_t, off_t) + ssize_t pwrite(int, const void *, size_t, off_t) + ssize_t read(int, void *, size_t) + ssize_t readlink(const char *, char *, size_t) + int rmdir(const char *) + int setegid(gid_t) + int seteuid(uid_t) + int setgid(gid_t) + int setpgid(pid_t, pid_t) + pid_t setpgrp() + int setregid(gid_t, gid_t) + int setreuid(uid_t, uid_t) + pid_t setsid() + int setuid(uid_t) + unsigned sleep(unsigned) + void swab(const void *, void *, ssize_t) + int symlink(const char *, const char *) + void sync() + long sysconf(int) + pid_t tcgetpgrp(int) + int tcsetpgrp(int, pid_t) + int truncate(const char *, off_t) + char *ttyname(int) + int ttyname_r(int, char *, size_t) + useconds_t ualarm(useconds_t, useconds_t) + int unlink(const char *) + int usleep(useconds_t) + pid_t vfork() + ssize_t write(int, const void *, size_t) + char *optarg + int optind + int opterr + int optopt diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/wait.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/wait.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f30be06df2cc70818f09c73108ef87b3de052dbb --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/posix/wait.pxd @@ -0,0 +1,38 @@ +# https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_wait.h.html + +from posix.types cimport pid_t, id_t +from posix.signal cimport siginfo_t +from posix.resource cimport rusage + +cdef extern from "" nogil: + enum: WNOHANG + enum: WUNTRACED + enum: WCONTINUED + enum: WEXITED + enum: WSTOPPED + enum: WNOWAIT + + int WEXITSTATUS(int status) + int WIFCONTINUED(int status) + int WIFEXITED(int status) + int WIFSIGNALED(int status) + int WIFSTOPPED(int status) + int WSTOPSIG(int status) + int WTERMSIG(int status) + + ctypedef int idtype_t + enum: P_ALL # idtype_t values + enum: P_PID + enum: P_PGID + + pid_t wait(int *stat_loc) + pid_t waitpid(pid_t pid, int *status, int options) + int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) + +# wait3 was in POSIX until 2008 while wait4 was never standardized. +# Even so, these calls are in almost every Unix, always in sys/wait.h. +# Hence, posix.wait is the least surprising place to declare them for Cython. +# libc may require _XXX_SOURCE to be defined at C-compile time to provide them. + + pid_t wait3(int *status, int options, rusage *rusage) + pid_t wait4(pid_t pid, int *status, int options, rusage *rusage) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/assortativity/neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..a8980da766f1e63e06990b35a3b403df5486cd50 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/assortativity/neighbor_degree.py @@ -0,0 +1,160 @@ +import networkx as nx + +__all__ = ["average_neighbor_degree"] + + +@nx._dispatch(edge_attrs="weight") +def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None): + r"""Returns the average degree of the neighborhood of each node. + + In an undirected graph, the neighborhood `N(i)` of node `i` contains the + nodes that are connected to `i` by an edge. + + For directed graphs, `N(i)` is defined according to the parameter `source`: + + - if source is 'in', then `N(i)` consists of predecessors of node `i`. + - if source is 'out', then `N(i)` consists of successors of node `i`. + - if source is 'in+out', then `N(i)` is both predecessors and successors. + + The average neighborhood degree of a node `i` is + + .. math:: + + k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j + + where `N(i)` are the neighbors of node `i` and `k_j` is + the degree of node `j` which belongs to `N(i)`. For weighted + graphs, an analogous measure can be defined [1]_, + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, `w_{ij}` + is the weight of the edge that links `i` and `j` and + `N(i)` are the neighbors of node `i`. + + + Parameters + ---------- + G : NetworkX graph + + source : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-neighbors of source node. + + target : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-degree for target node. + + nodes : list or iterable, optional (default=G.nodes) + Compute neighbor degree only for specified nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d: dict + A dictionary keyed by node to the average degree of its neighbors. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[0, 1]["weight"] = 5 + >>> G.edges[2, 3]["weight"] = 3 + + >>> nx.average_neighbor_degree(G) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0} + >>> nx.average_neighbor_degree(G, weight="weight") + {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0} + + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.average_neighbor_degree(G, source="in", target="in") + {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0} + + >>> nx.average_neighbor_degree(G, source="out", target="out") + {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0} + + See Also + -------- + average_degree_connectivity + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + if G.is_directed(): + if source == "in": + source_degree = G.in_degree + elif source == "out": + source_degree = G.out_degree + elif source == "in+out": + source_degree = G.degree + else: + raise nx.NetworkXError( + f"source argument {source} must be 'in', 'out' or 'in+out'" + ) + + if target == "in": + target_degree = G.in_degree + elif target == "out": + target_degree = G.out_degree + elif target == "in+out": + target_degree = G.degree + else: + raise nx.NetworkXError( + f"target argument {target} must be 'in', 'out' or 'in+out'" + ) + else: + if source != "out" or target != "out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = target_degree = G.degree + + # precompute target degrees -- should *not* be weighted degree + t_deg = dict(target_degree()) + + # Set up both predecessor and successor neighbor dicts leaving empty if not needed + G_P = G_S = {n: {} for n in G} + if G.is_directed(): + # "in" or "in+out" cases: G_P contains predecessors + if "in" in source: + G_P = G.pred + # "out" or "in+out" cases: G_S contains successors + if "out" in source: + G_S = G.succ + else: + # undirected leave G_P empty but G_S is the adjacency + G_S = G.adj + + # Main loop: Compute average degree of neighbors + avg = {} + for n, deg in source_degree(nodes, weight=weight): + # handle degree zero average + if deg == 0: + avg[n] = 0.0 + continue + + # we sum over both G_P and G_S, but one of the two is usually empty. + if weight is None: + avg[n] = ( + sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n]) + ) / deg + else: + avg[n] = ( + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items()) + + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items()) + ) / deg + return avg diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6580abd3a5e39c3f44327ffd792f515a526e074f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6698486f620685151a06d1082c377d3476f88747 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..018b4091dc743894f7a78c7ae16b3a1ce4427f40 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/equitable_coloring.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/equitable_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..af1fb5a7e7c20392a82673406b923e89b6e525f2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/equitable_coloring.py @@ -0,0 +1,505 @@ +""" +Equitable coloring of graphs with bounded degree. +""" + +from collections import defaultdict + +import networkx as nx + +__all__ = ["equitable_color"] + + +@nx._dispatch +def is_coloring(G, coloring): + """Determine if the coloring is a valid coloring for the graph G.""" + # Verify that the coloring is valid. + return all(coloring[s] != coloring[d] for s, d in G.edges) + + +@nx._dispatch +def is_equitable(G, coloring, num_colors=None): + """Determines if the coloring is valid and equitable for the graph G.""" + + if not is_coloring(G, coloring): + return False + + # Verify whether it is equitable. + color_set_size = defaultdict(int) + for color in coloring.values(): + color_set_size[color] += 1 + + if num_colors is not None: + for color in range(num_colors): + if color not in color_set_size: + # These colors do not have any vertices attached to them. + color_set_size[color] = 0 + + # If there are more than 2 distinct values, the coloring cannot be equitable + all_set_sizes = set(color_set_size.values()) + if len(all_set_sizes) == 0 and num_colors is None: # Was an empty graph + return True + elif len(all_set_sizes) == 1: + return True + elif len(all_set_sizes) == 2: + a, b = list(all_set_sizes) + return abs(a - b) <= 1 + else: # len(all_set_sizes) > 2: + return False + + +def make_C_from_F(F): + C = defaultdict(list) + for node, color in F.items(): + C[color].append(node) + + return C + + +def make_N_from_L_C(L, C): + nodes = L.keys() + colors = C.keys() + return { + (node, color): sum(1 for v in L[node] if v in C[color]) + for node in nodes + for color in colors + } + + +def make_H_from_C_N(C, N): + return { + (c1, c2): sum(1 for node in C[c1] if N[(node, c2)] == 0) for c1 in C for c2 in C + } + + +def change_color(u, X, Y, N, H, F, C, L): + """Change the color of 'u' from X to Y and update N, H, F, C.""" + assert F[u] == X and X != Y + + # Change the class of 'u' from X to Y + F[u] = Y + + for k in C: + # 'u' witnesses an edge from k -> Y instead of from k -> X now. + if N[u, k] == 0: + H[(X, k)] -= 1 + H[(Y, k)] += 1 + + for v in L[u]: + # 'v' has lost a neighbor in X and gained one in Y + N[(v, X)] -= 1 + N[(v, Y)] += 1 + + if N[(v, X)] == 0: + # 'v' witnesses F[v] -> X + H[(F[v], X)] += 1 + + if N[(v, Y)] == 1: + # 'v' no longer witnesses F[v] -> Y + H[(F[v], Y)] -= 1 + + C[X].remove(u) + C[Y].append(u) + + +def move_witnesses(src_color, dst_color, N, H, F, C, T_cal, L): + """Move witness along a path from src_color to dst_color.""" + X = src_color + while X != dst_color: + Y = T_cal[X] + # Move _any_ witness from X to Y = T_cal[X] + w = next(x for x in C[X] if N[(x, Y)] == 0) + change_color(w, X, Y, N=N, H=H, F=F, C=C, L=L) + X = Y + + +@nx._dispatch +def pad_graph(G, num_colors): + """Add a disconnected complete clique K_p such that the number of nodes in + the graph becomes a multiple of `num_colors`. + + Assumes that the graph's nodes are labelled using integers. + + Returns the number of nodes with each color. + """ + + n_ = len(G) + r = num_colors - 1 + + # Ensure that the number of nodes in G is a multiple of (r + 1) + s = n_ // (r + 1) + if n_ != s * (r + 1): + p = (r + 1) - n_ % (r + 1) + s += 1 + + # Complete graph K_p between (imaginary) nodes [n_, ... , n_ + p] + K = nx.relabel_nodes(nx.complete_graph(p), {idx: idx + n_ for idx in range(p)}) + G.add_edges_from(K.edges) + + return s + + +def procedure_P(V_minus, V_plus, N, H, F, C, L, excluded_colors=None): + """Procedure P as described in the paper.""" + + if excluded_colors is None: + excluded_colors = set() + + A_cal = set() + T_cal = {} + R_cal = [] + + # BFS to determine A_cal, i.e. colors reachable from V- + reachable = [V_minus] + marked = set(reachable) + idx = 0 + + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + A_cal.add(pop) + R_cal.append(pop) + + # TODO: Checking whether a color has been visited can be made faster by + # using a look-up table instead of testing for membership in a set by a + # logarithmic factor. + next_layer = [] + for k in C: + if ( + H[(k, pop)] > 0 + and k not in A_cal + and k not in excluded_colors + and k not in marked + ): + next_layer.append(k) + + for dst in next_layer: + # Record that `dst` can reach `pop` + T_cal[dst] = pop + + marked.update(next_layer) + reachable.extend(next_layer) + + # Variables for the algorithm + b = len(C) - len(A_cal) + + if V_plus in A_cal: + # Easy case: V+ is in A_cal + # Move one node from V+ to V- using T_cal to find the parents. + move_witnesses(V_plus, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L) + else: + # If there is a solo edge, we can resolve the situation by + # moving witnesses from B to A, making G[A] equitable and then + # recursively balancing G[B - w] with a different V_minus and + # but the same V_plus. + + A_0 = set() + A_cal_0 = set() + num_terminal_sets_found = 0 + made_equitable = False + + for W_1 in R_cal[::-1]: + for v in C[W_1]: + X = None + + for U in C: + if N[(v, U)] == 0 and U in A_cal and U != W_1: + X = U + + # v does not witness an edge in H[A_cal] + if X is None: + continue + + for U in C: + # Note: Departing from the paper here. + if N[(v, U)] >= 1 and U not in A_cal: + X_prime = U + w = v + + try: + # Finding the solo neighbor of w in X_prime + y = next( + node + for node in L[w] + if F[node] == X_prime and N[(node, W_1)] == 1 + ) + except StopIteration: + pass + else: + W = W_1 + + # Move w from W to X, now X has one extra node. + change_color(w, W, X, N=N, H=H, F=F, C=C, L=L) + + # Move witness from X to V_minus, making the coloring + # equitable. + move_witnesses( + src_color=X, + dst_color=V_minus, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal, + L=L, + ) + + # Move y from X_prime to W, making W the correct size. + change_color(y, X_prime, W, N=N, H=H, F=F, C=C, L=L) + + # Then call the procedure on G[B - y] + procedure_P( + V_minus=X_prime, + V_plus=V_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors.union(A_cal), + ) + made_equitable = True + break + + if made_equitable: + break + else: + # No node in W_1 was found such that + # it had a solo-neighbor. + A_cal_0.add(W_1) + A_0.update(C[W_1]) + num_terminal_sets_found += 1 + + if num_terminal_sets_found == b: + # Otherwise, construct the maximal independent set and find + # a pair of z_1, z_2 as in Case II. + + # BFS to determine B_cal': the set of colors reachable from V+ + B_cal_prime = set() + T_cal_prime = {} + + reachable = [V_plus] + marked = set(reachable) + idx = 0 + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + B_cal_prime.add(pop) + + # No need to check for excluded_colors here because + # they only exclude colors from A_cal + next_layer = [ + k + for k in C + if H[(pop, k)] > 0 and k not in B_cal_prime and k not in marked + ] + + for dst in next_layer: + T_cal_prime[pop] = dst + + marked.update(next_layer) + reachable.extend(next_layer) + + # Construct the independent set of G[B'] + I_set = set() + I_covered = set() + W_covering = {} + + B_prime = [node for k in B_cal_prime for node in C[k]] + + # Add the nodes in V_plus to I first. + for z in C[V_plus] + B_prime: + if z in I_covered or F[z] not in B_cal_prime: + continue + + I_set.add(z) + I_covered.add(z) + I_covered.update(list(L[z])) + + for w in L[z]: + if F[w] in A_cal_0 and N[(z, F[w])] == 1: + if w not in W_covering: + W_covering[w] = z + else: + # Found z1, z2 which have the same solo + # neighbor in some W + z_1 = W_covering[w] + # z_2 = z + + Z = F[z_1] + W = F[w] + + # shift nodes along W, V- + move_witnesses( + W, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L + ) + + # shift nodes along V+ to Z + move_witnesses( + V_plus, + Z, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal_prime, + L=L, + ) + + # change color of z_1 to W + change_color(z_1, Z, W, N=N, H=H, F=F, C=C, L=L) + + # change color of w to some color in B_cal + W_plus = next( + k for k in C if N[(w, k)] == 0 and k not in A_cal + ) + change_color(w, W, W_plus, N=N, H=H, F=F, C=C, L=L) + + # recurse with G[B \cup W*] + excluded_colors.update( + [k for k in C if k != W and k not in B_cal_prime] + ) + procedure_P( + V_minus=W, + V_plus=W_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors, + ) + + made_equitable = True + break + + if made_equitable: + break + else: + assert False, ( + "Must find a w which is the solo neighbor " + "of two vertices in B_cal_prime." + ) + + if made_equitable: + break + + +@nx._dispatch +def equitable_color(G, num_colors): + """Provides an equitable coloring for nodes of `G`. + + Attempts to color a graph using `num_colors` colors, where no neighbors of + a node can have same color as the node itself and the number of nodes with + each color differ by at most 1. `num_colors` must be greater than the + maximum degree of `G`. The algorithm is described in [1]_ and has + complexity O(num_colors * n**2). + + Parameters + ---------- + G : networkX graph + The nodes of this graph will be colored. + + num_colors : number of colors to use + This number must be at least one more than the maximum degree of nodes + in the graph. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.coloring.equitable_color(G, num_colors=3) # doctest: +SKIP + {0: 2, 1: 1, 2: 2, 3: 0} + + Raises + ------ + NetworkXAlgorithmError + If `num_colors` is not at least the maximum degree of the graph `G` + + References + ---------- + .. [1] Kierstead, H. A., Kostochka, A. V., Mydlarz, M., & Szemerédi, E. + (2010). A fast algorithm for equitable coloring. Combinatorica, 30(2), + 217-224. + """ + + # Map nodes to integers for simplicity later. + nodes_to_int = {} + int_to_nodes = {} + + for idx, node in enumerate(G.nodes): + nodes_to_int[node] = idx + int_to_nodes[idx] = node + + G = nx.relabel_nodes(G, nodes_to_int, copy=True) + + # Basic graph statistics and sanity check. + if len(G.nodes) > 0: + r_ = max(G.degree(node) for node in G.nodes) + else: + r_ = 0 + + if r_ >= num_colors: + raise nx.NetworkXAlgorithmError( + f"Graph has maximum degree {r_}, needs " + f"{r_ + 1} (> {num_colors}) colors for guaranteed coloring." + ) + + # Ensure that the number of nodes in G is a multiple of (r + 1) + pad_graph(G, num_colors) + + # Starting the algorithm. + # L = {node: list(G.neighbors(node)) for node in G.nodes} + L_ = {node: [] for node in G.nodes} + + # Arbitrary equitable allocation of colors to nodes. + F = {node: idx % num_colors for idx, node in enumerate(G.nodes)} + + C = make_C_from_F(F) + + # The neighborhood is empty initially. + N = make_N_from_L_C(L_, C) + + # Currently all nodes witness all edges. + H = make_H_from_C_N(C, N) + + # Start of algorithm. + edges_seen = set() + + for u in sorted(G.nodes): + for v in sorted(G.neighbors(u)): + # Do not double count edges if (v, u) has already been seen. + if (v, u) in edges_seen: + continue + + edges_seen.add((u, v)) + + L_[u].append(v) + L_[v].append(u) + + N[(u, F[v])] += 1 + N[(v, F[u])] += 1 + + if F[u] != F[v]: + # Were 'u' and 'v' witnesses for F[u] -> F[v] or F[v] -> F[u]? + if N[(u, F[v])] == 1: + H[F[u], F[v]] -= 1 # u cannot witness an edge between F[u], F[v] + + if N[(v, F[u])] == 1: + H[F[v], F[u]] -= 1 # v cannot witness an edge between F[v], F[u] + + if N[(u, F[u])] != 0: + # Find the first color where 'u' does not have any neighbors. + Y = next(k for k in C if N[(u, k)] == 0) + X = F[u] + change_color(u, X, Y, N=N, H=H, F=F, C=C, L=L_) + + # Procedure P + procedure_P(V_minus=X, V_plus=Y, N=N, H=H, F=F, C=C, L=L_) + + return {int_to_nodes[x]: F[x] for x in int_to_nodes} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f568dfcf2d1d2d5b403aef14df7e8412c73014e Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a9abf777278d85f7091e30343c9209bcf333919 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/test_coloring.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/test_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a4e39589ea981445f6e9e222087714ef88e141 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/coloring/tests/test_coloring.py @@ -0,0 +1,865 @@ +"""Greedy coloring test suite. + +""" + +import itertools + +import pytest + +import networkx as nx + +is_coloring = nx.algorithms.coloring.equitable_coloring.is_coloring +is_equitable = nx.algorithms.coloring.equitable_coloring.is_equitable + + +ALL_STRATEGIES = [ + "largest_first", + "random_sequential", + "smallest_last", + "independent_set", + "connected_sequential_bfs", + "connected_sequential_dfs", + "connected_sequential", + "saturation_largest_first", + "DSATUR", +] + +# List of strategies where interchange=True results in an error +INTERCHANGE_INVALID = ["independent_set", "saturation_largest_first", "DSATUR"] + + +class TestColoring: + def test_basic_cases(self): + def check_basic_case(graph_func, n_nodes, strategy, interchange): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + assert verify_length(coloring, n_nodes) + assert verify_coloring(graph, coloring) + + for graph_func, n_nodes in BASIC_TEST_CASES.items(): + for interchange in [True, False]: + for strategy in ALL_STRATEGIES: + check_basic_case(graph_func, n_nodes, strategy, False) + if strategy not in INTERCHANGE_INVALID: + check_basic_case(graph_func, n_nodes, strategy, True) + + def test_special_cases(self): + def check_special_case(strategy, graph_func, interchange, colors): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + if not hasattr(colors, "__len__"): + colors = [colors] + assert any(verify_length(coloring, n_colors) for n_colors in colors) + assert verify_coloring(graph, coloring) + + for strategy, arglist in SPECIAL_TEST_CASES.items(): + for args in arglist: + check_special_case(strategy, args[0], args[1], args[2]) + + def test_interchange_invalid(self): + graph = one_node_graph() + for strategy in INTERCHANGE_INVALID: + pytest.raises( + nx.NetworkXPointlessConcept, + nx.coloring.greedy_color, + graph, + strategy=strategy, + interchange=True, + ) + + def test_bad_inputs(self): + graph = one_node_graph() + pytest.raises( + nx.NetworkXError, + nx.coloring.greedy_color, + graph, + strategy="invalid strategy", + ) + + def test_strategy_as_function(self): + graph = lf_shc() + colors_1 = nx.coloring.greedy_color(graph, "largest_first") + colors_2 = nx.coloring.greedy_color(graph, nx.coloring.strategy_largest_first) + assert colors_1 == colors_2 + + def test_seed_argument(self): + graph = lf_shc() + rs = nx.coloring.strategy_random_sequential + c1 = nx.coloring.greedy_color(graph, lambda g, c: rs(g, c, seed=1)) + for u, v in graph.edges: + assert c1[u] != c1[v] + + def test_is_coloring(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_coloring(G, coloring) + + coloring[0] = 1 + assert not is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_is_equitable(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_equitable(G, coloring) + + G.add_edges_from([(2, 3), (2, 4), (2, 5)]) + coloring[3] = 1 + coloring[4] = 1 + coloring[5] = 1 + assert is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_num_colors(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (0, 3)]) + pytest.raises(nx.NetworkXAlgorithmError, nx.coloring.equitable_color, G, 2) + + def test_equitable_color(self): + G = nx.fast_gnp_random_graph(n=10, p=0.2, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_empty(self): + G = nx.empty_graph() + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_large(self): + G = nx.fast_gnp_random_graph(100, 0.1, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring, num_colors=max_degree(G) + 1) + + def test_case_V_plus_not_in_A_cal(self): + # Hand crafted case to avoid the easy case. + L = { + 0: [2, 5], + 1: [3, 4], + 2: [0, 8], + 3: [1, 7], + 4: [1, 6], + 5: [0, 6], + 6: [4, 5], + 7: [3], + 8: [2], + } + + F = { + # Color 0 + 0: 0, + 1: 0, + # Color 1 + 2: 1, + 3: 1, + 4: 1, + 5: 1, + # Color 2 + 6: 2, + 7: 2, + 8: 2, + } + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_cast_no_solo(self): + L = { + 0: [8, 9], + 1: [10, 11], + 2: [8], + 3: [9], + 4: [10, 11], + 5: [8], + 6: [9], + 7: [10, 11], + 8: [0, 2, 5], + 9: [0, 3, 6], + 10: [1, 4, 7], + 11: [1, 4, 7], + } + + F = {0: 0, 1: 0, 2: 2, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 1, 9: 1, 10: 1, 11: 1} + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_hard_prob(self): + # Tests for two levels of recursion. + num_colors, s = 5, 5 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 10), + (0, 11), + (0, 12), + (0, 23), + (10, 4), + (10, 9), + (10, 20), + (11, 4), + (11, 8), + (11, 16), + (12, 9), + (12, 22), + (12, 23), + (23, 7), + (1, 17), + (1, 18), + (1, 19), + (1, 24), + (17, 5), + (17, 13), + (17, 22), + (18, 5), + (19, 5), + (19, 6), + (19, 8), + (24, 7), + (24, 16), + (2, 4), + (2, 13), + (2, 14), + (2, 15), + (4, 6), + (13, 5), + (13, 21), + (14, 6), + (14, 15), + (15, 6), + (15, 21), + (3, 16), + (3, 20), + (3, 21), + (3, 22), + (16, 8), + (20, 8), + (21, 9), + (22, 7), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_hardest_prob(self): + # Tests for two levels of recursion. + num_colors, s = 10, 4 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 19), + (0, 24), + (0, 29), + (0, 30), + (0, 35), + (19, 3), + (19, 7), + (19, 9), + (19, 15), + (19, 21), + (19, 24), + (19, 30), + (19, 38), + (24, 5), + (24, 11), + (24, 13), + (24, 20), + (24, 30), + (24, 37), + (24, 38), + (29, 6), + (29, 10), + (29, 13), + (29, 15), + (29, 16), + (29, 17), + (29, 20), + (29, 26), + (30, 6), + (30, 10), + (30, 15), + (30, 22), + (30, 23), + (30, 39), + (35, 6), + (35, 9), + (35, 14), + (35, 18), + (35, 22), + (35, 23), + (35, 25), + (35, 27), + (1, 20), + (1, 26), + (1, 31), + (1, 34), + (1, 38), + (20, 4), + (20, 8), + (20, 14), + (20, 18), + (20, 28), + (20, 33), + (26, 7), + (26, 10), + (26, 14), + (26, 18), + (26, 21), + (26, 32), + (26, 39), + (31, 5), + (31, 8), + (31, 13), + (31, 16), + (31, 17), + (31, 21), + (31, 25), + (31, 27), + (34, 7), + (34, 8), + (34, 13), + (34, 18), + (34, 22), + (34, 23), + (34, 25), + (34, 27), + (38, 4), + (38, 9), + (38, 12), + (38, 14), + (38, 21), + (38, 27), + (2, 3), + (2, 18), + (2, 21), + (2, 28), + (2, 32), + (2, 33), + (2, 36), + (2, 37), + (2, 39), + (3, 5), + (3, 9), + (3, 13), + (3, 22), + (3, 23), + (3, 25), + (3, 27), + (18, 6), + (18, 11), + (18, 15), + (18, 39), + (21, 4), + (21, 10), + (21, 14), + (21, 36), + (28, 6), + (28, 10), + (28, 14), + (28, 16), + (28, 17), + (28, 25), + (28, 27), + (32, 5), + (32, 10), + (32, 12), + (32, 16), + (32, 17), + (32, 22), + (32, 23), + (33, 7), + (33, 10), + (33, 12), + (33, 16), + (33, 17), + (33, 25), + (33, 27), + (36, 5), + (36, 8), + (36, 15), + (36, 16), + (36, 17), + (36, 25), + (36, 27), + (37, 5), + (37, 11), + (37, 15), + (37, 16), + (37, 17), + (37, 22), + (37, 23), + (39, 7), + (39, 8), + (39, 15), + (39, 22), + (39, 23), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 # V- = 0, V+ = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_strategy_saturation_largest_first(self): + def color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=None, + nodes_to_add_between_calls=1, + ): + color_assignments = [] + aux_colored_nodes = colored_nodes.copy() + + node_iterator = nx.algorithms.coloring.greedy_coloring.strategy_saturation_largest_first( + G, aux_colored_nodes + ) + + for u in node_iterator: + # Set to keep track of colors of neighbours + neighbour_colors = { + aux_colored_nodes[v] for v in G[u] if v in aux_colored_nodes + } + # Find the first unused color. + for color in itertools.count(): + if color not in neighbour_colors: + break + aux_colored_nodes[u] = color + color_assignments.append((u, color)) + + # Color nodes between iterations + for i in range(nodes_to_add_between_calls - 1): + if not len(color_assignments) + len(colored_nodes) >= len( + full_color_assignment + ): + full_color_assignment_node, color = full_color_assignment[ + len(color_assignments) + len(colored_nodes) + ] + + # Assign the new color to the current node. + aux_colored_nodes[full_color_assignment_node] = color + color_assignments.append((full_color_assignment_node, color)) + + return color_assignments, aux_colored_nodes + + for G, _, _ in SPECIAL_TEST_CASES["saturation_largest_first"]: + G = G() + + # Check that function still works when nodes are colored between iterations + for nodes_to_add_between_calls in range(1, 5): + # Get a full color assignment, (including the order in which nodes were colored) + colored_nodes = {} + full_color_assignment, full_colored_nodes = color_remaining_nodes( + G, colored_nodes + ) + + # For each node in the color assignment, add it to colored_nodes and re-run the function + for ind, (node, color) in enumerate(full_color_assignment): + colored_nodes[node] = color + + ( + partial_color_assignment, + partial_colored_nodes, + ) = color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=full_color_assignment, + nodes_to_add_between_calls=nodes_to_add_between_calls, + ) + + # Check that the color assignment and order of remaining nodes are the same + assert full_color_assignment[ind + 1 :] == partial_color_assignment + assert full_colored_nodes == partial_colored_nodes + + +# ############################ Utility functions ############################ +def verify_coloring(graph, coloring): + for node in graph.nodes(): + if node not in coloring: + return False + + color = coloring[node] + for neighbor in graph.neighbors(node): + if coloring[neighbor] == color: + return False + + return True + + +def verify_length(coloring, expected): + coloring = dict_to_sets(coloring) + return len(coloring) == expected + + +def dict_to_sets(colors): + if len(colors) == 0: + return [] + + k = max(colors.values()) + 1 + sets = [set() for _ in range(k)] + + for node, color in colors.items(): + sets[color].add(node) + + return sets + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + return graph + + +def rs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def slf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def slf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 4), + (2, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 8), + (7, 8), + ] + ) + return graph + + +def lf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(6, 1), (1, 4), (4, 3), (3, 2), (2, 5)]) + return graph + + +def lf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 7), + (1, 6), + (1, 3), + (1, 4), + (7, 2), + (2, 6), + (2, 3), + (2, 5), + (5, 3), + (5, 4), + (4, 3), + ] + ) + return graph + + +def sl_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 3), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def sl_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 4), + (2, 8), + (8, 4), + (8, 6), + (8, 7), + (7, 5), + (7, 6), + (3, 4), + (4, 6), + (6, 5), + (5, 3), + ] + ) + return graph + + +def gis_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def gis_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(1, 5), (2, 5), (3, 6), (4, 6), (5, 6)]) + return graph + + +def cs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + graph.add_edges_from([(1, 2), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (4, 5)]) + return graph + + +def rsi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (3, 4), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 5), + (1, 6), + (1, 7), + (2, 3), + (2, 8), + (2, 9), + (3, 4), + (3, 8), + (3, 9), + (4, 5), + (4, 6), + (4, 7), + (5, 6), + ] + ) + return graph + + +def sli_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 6), + (3, 4), + (4, 5), + (4, 6), + (5, 7), + (6, 7), + ] + ) + return graph + + +def sli_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 7), + (2, 8), + (2, 9), + (3, 6), + (3, 7), + (3, 9), + (4, 5), + (4, 6), + (4, 8), + (4, 9), + (5, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 9), + (7, 8), + (8, 9), + ] + ) + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify the number of expected colors. +BASIC_TEST_CASES = { + empty_graph: 0, + one_node_graph: 1, + two_node_graph: 2, + disconnected: 2, + three_node_clique: 3, +} + + +# -------------------------------------------------------------------------- +# Special test cases. Each strategy has a list of tuples of the form +# (graph function, interchange, valid # of colors) +SPECIAL_TEST_CASES = { + "random_sequential": [ + (rs_shc, False, (2, 3)), + (rs_shc, True, 2), + (rsi_shc, True, (3, 4)), + ], + "saturation_largest_first": [(slf_shc, False, (3, 4)), (slf_hc, False, 4)], + "largest_first": [ + (lf_shc, False, (2, 3)), + (lf_hc, False, 4), + (lf_shc, True, 2), + (lf_hc, True, 3), + (lfi_shc, True, (3, 4)), + (lfi_hc, True, 4), + ], + "smallest_last": [ + (sl_shc, False, (3, 4)), + (sl_hc, False, 5), + (sl_shc, True, 3), + (sl_hc, True, 4), + (sli_shc, True, (3, 4)), + (sli_hc, True, 5), + ], + "independent_set": [(gis_shc, False, (2, 3)), (gis_hc, False, 3)], + "connected_sequential": [(cs_shc, False, (3, 4)), (cs_shc, True, 3)], + "connected_sequential_dfs": [(cs_shc, False, (3, 4))], +} + + +# -------------------------------------------------------------------------- +# Helper functions to test +# (graph function, interchange, valid # of colors) + + +def check_state(L, N, H, F, C): + s = len(C[0]) + num_colors = len(C.keys()) + + assert all(u in L[v] for u in L for v in L[u]) + assert all(F[u] != F[v] for u in L for v in L[u]) + assert all(len(L[u]) < num_colors for u in L) + assert all(len(C[x]) == s for x in C) + assert all(H[(c1, c2)] >= 0 for c1 in C for c2 in C) + assert all(N[(u, F[u])] == 0 for u in F) + + +def max_degree(G): + """Get the maximum degree of any node in G.""" + return max(G.degree(node) for node in G.nodes) if len(G.nodes) > 0 else 0 + + +def make_params_from_graph(G, F): + """Returns {N, L, H, C} from the given graph.""" + num_nodes = len(G) + L = {u: [] for u in range(num_nodes)} + for u, v in G.edges: + L[u].append(v) + L[v].append(u) + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + return {"N": N, "F": F, "C": C, "H": H, "L": L} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8d67293b9562b2021337a9267721fdd93eac56b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68bf26cf7fa1f8c2838d4d4debcc2b293bdcbb23 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551fa36ec7036dc5cbb8787d4e86f9de03499512 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee1c3ef24a1944f260e4b6e9d69a96031711317c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ef9ab975d044f578e980a5b92f3fd976edc408a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d9a587dbbc4c0d03690109b7f8979a1aec04585 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cec8163484bb1e8889c7539fcbe6af6037c7f8f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9974796354bd411144823221ce82165eb6e7c424 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/cuts.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..d5883ba8f8e08563946565c28a95a36f13ec547b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/cuts.py @@ -0,0 +1,615 @@ +""" +Flow based cut algorithms +""" +import itertools + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# cut algorithms. +from networkx.algorithms.flow import build_residual_network, edmonds_karp + +default_flow_func = edmonds_karp + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = [ + "minimum_st_node_cut", + "minimum_node_cut", + "minimum_st_edge_cut", + "minimum_edge_cut", +] + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 4, "residual?": 5}, + preserve_edge_attrs={ + "auxiliary": {"capacity": float("inf")}, + "residual": {"capacity": float("inf")}, + }, + preserve_graph_attrs={"auxiliary", "residual"}, +) +def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + """Returns the edges of the cut-set of a minimum (s, t)-cut. + + This function returns the set of edges of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + Edge weights are not considered. See :meth:`minimum_cut` for + computing minimum cuts considering edge weights. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`node_connectivity` for + details. The choice of the default function may change from version + to version and should not be relied on. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed from the graph, will disconnect it. + + See also + -------- + :meth:`minimum_cut` + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_edge_cut + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_edge_cut(G, 0, 6)) + 5 + + If you need to compute local edge cuts on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge cuts among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = len(minimum_st_edge_cut(G, u, v, auxiliary=H, residual=R)) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_edge_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = {"capacity": "capacity", "flow_func": flow_func, "residual": residual} + + cut_value, partition = nx.minimum_cut(H, s, t, **kwargs) + reachable, non_reachable = partition + # Any edge in the original graph linking the two sets in the + # partition is part of the edge cutset + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + + return cutset + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 4, "residual?": 5}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_node_attrs={"auxiliary": {"id": None}}, + preserve_graph_attrs={"auxiliary", "residual"}, +) +def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + r"""Returns a set of nodes of minimum cardinality that disconnect source + from target in G. + + This function returns the set of nodes of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would destroy all paths between + source and target in G. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_node_cut + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_node_cut(G, 0, 6)) + 5 + + If you need to compute local st cuts between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute local st node cuts reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> len(minimum_st_node_cut(G, 0, 6, auxiliary=H, residual=R)) + 5 + + You can also use alternative flow algorithms for computing minimum st + node cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_node_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + if G.has_edge(s, t) or G.has_edge(t, s): + return {} + kwargs = {"flow_func": flow_func, "residual": residual, "auxiliary": H} + + # The edge cut in the auxiliary digraph corresponds to the node cut in the + # original graph. + edge_cut = minimum_st_edge_cut(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + # Each node in the original graph maps to two nodes of the auxiliary graph + node_cut = {H.nodes[node]["id"] for edge in edge_cut for node in edge} + return node_cut - {s, t} + + +@nx._dispatch +def minimum_node_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of nodes of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of nodes of minimum cardinality that, if removed, would destroy + all paths among source and target in G. If not, it returns a set + of nodes of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the nodes that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has node connectivity 5 + >>> G = nx.icosahedral_graph() + >>> node_cut = nx.minimum_node_cut(G) + >>> len(node_cut) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> node_cut == nx.minimum_node_cut(G, flow_func=shortest_augmenting_path) + True + + If you specify a pair of nodes (source and target) as parameters, + this function returns a local st node cut. + + >>> len(nx.minimum_node_cut(G, 3, 7)) + 5 + + If you need to perform several local st cuts among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`minimum_st_node_cut` for details. + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_st_node_cut` + :meth:`minimum_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local minimum node cut. + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_node_cut(G, s, t, flow_func=flow_func) + + # Global minimum node cut. + # Analog to the algorithm 11 for global node connectivity in [1]. + if G.is_directed(): + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network. + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + # Choose a node with minimum degree. + v = min(G, key=G.degree) + # Initial node cutset is all neighbors of the node with minimum degree. + min_cut = set(G[v]) + # Compute st node cuts between v and all its non-neighbors nodes in G. + for w in set(G) - set(neighbors(v)) - {v}: + this_cut = minimum_st_node_cut(G, v, w, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + # Also for non adjacent pairs of neighbors of v. + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + this_cut = minimum_st_node_cut(G, x, y, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + + return min_cut + + +@nx._dispatch +def minimum_edge_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of edges of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of edges of minimum cardinality that, if removed, would break + all paths among source and target in G. If not, it returns a set of + edges of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the edges that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has edge connectivity 5 + >>> G = nx.icosahedral_graph() + >>> len(nx.minimum_edge_cut(G)) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(nx.minimum_edge_cut(G, flow_func=shortest_augmenting_path)) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of minimum edge cut. For + undirected graphs the algorithm works by finding a 'small' dominating + set of nodes of G (see algorithm 7 in [1]_) and computing the maximum + flow between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_. For + directed graphs, the algorithm does n calls to the max flow function. + The function raises an error if the directed graph is not weakly + connected and returns an empty set if it is weakly connected. + It is an implementation of algorithm 8 in [1]_. + + See also + -------- + :meth:`minimum_st_edge_cut` + :meth:`minimum_node_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "residual": R, "auxiliary": H} + + # Local minimum edge cut if s and t are not None + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_edge_cut(H, s, t, **kwargs) + + # Global minimum edge cut + # Analog to the algorithm for global edge connectivity + if G.is_directed(): + # Based on algorithm 8 in [1] + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + nodes = list(G) + n = len(nodes) + for i in range(n): + try: + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i + 1], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + except IndexError: # Last node! + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut + + else: # undirected + # Based on algorithm 6 in [1] + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating set will always be of one node + # thus we return min_cut, which now contains the edges of a node + # with minimum degree + return min_cut + for w in D: + this_cut = minimum_st_edge_cut(H, v, w, **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..0c0fad9f5ca474a6b547a399f8f284f7ff6e33a4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py @@ -0,0 +1,249 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.utils import pairwise + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.edmonds_karp, + flow.dinitz, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +def is_path(G, path): + return all(v in G[u] for u, v in pairwise(path)) + + +def are_edge_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + paths_edges = [list(pairwise(p)) for p in paths] + num_of_edges = sum(len(e) for e in paths_edges) + num_unique_edges = len(set.union(*[set(es) for es in paths_edges])) + if num_of_edges == num_unique_edges: + return True + return False + + +def are_node_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + # first and last nodes are source and target + st = {paths[0][0], paths[0][-1]} + num_of_nodes = len([n for path in paths for n in path if n not in st]) + num_unique_nodes = len({n for path in paths for n in path if n not in st}) + if num_of_nodes == num_unique_nodes: + return True + return False + + +def test_graph_from_pr_2053(): + G = nx.Graph() + G.add_edges_from( + [ + ("A", "B"), + ("A", "D"), + ("A", "F"), + ("A", "G"), + ("B", "C"), + ("B", "D"), + ("B", "G"), + ("C", "D"), + ("C", "E"), + ("C", "Z"), + ("D", "E"), + ("D", "F"), + ("E", "F"), + ("E", "Z"), + ("F", "Z"), + ("G", "Z"), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_paths = list(nx.edge_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_edge_disjoint_paths(G, edge_paths), errmsg + assert nx.edge_connectivity(G, "A", "Z") == len(edge_paths), errmsg + # node disjoint paths + node_paths = list(nx.node_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_node_disjoint_paths(G, node_paths), errmsg + assert nx.node_connectivity(G, "A", "Z") == len(node_paths), errmsg + + +def test_florentine_families(): + G = nx.florentine_families_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, "Medici", "Strozzi") == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, "Medici", "Strozzi") == len(node_dpaths), errmsg + + +def test_karate(): + G = nx.karate_club_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 33, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, 0, 33) == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 33, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, 0, 33) == len(node_dpaths), errmsg + + +def test_petersen_disjoint_paths(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 3 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 3 == len(node_dpaths), errmsg + + +def test_octahedral_disjoint_paths(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 5, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 4 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 5, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 4 == len(node_dpaths), errmsg + + +def test_icosahedral_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 5 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 5 == len(node_dpaths), errmsg + + +def test_cutoff_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for cutoff in [2, 4]: + kwargs["cutoff"] = cutoff + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert cutoff == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert cutoff == len(node_dpaths), errmsg + + +def test_missing_source_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 10, 1)) + + +def test_missing_source_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 10, 1)) + + +def test_missing_target_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 1, 10)) + + +def test_missing_target_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 1, 10)) + + +def test_not_weakly_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_weakly_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_not_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_isolated_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_isolated_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_invalid_auxiliary(): + with pytest.raises(nx.NetworkXError): + G = nx.complete_graph(5) + list(nx.node_disjoint_paths(G, 0, 3, auxiliary=G)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/utils.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6860a208f0f11b50d2950e8462f1c5649b4243 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/connectivity/utils.py @@ -0,0 +1,87 @@ +""" +Utilities for connectivity package +""" +import networkx as nx + +__all__ = ["build_auxiliary_node_connectivity", "build_auxiliary_edge_connectivity"] + + +@nx._dispatch +def build_auxiliary_node_connectivity(G): + r"""Creates a directed graph D from an undirected graph G to compute flow + based node connectivity. + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph D with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc in D. Then for each edge (`u`, `v`) in G we add two arcs (`uB`, `vA`) + and (`vB`, `uA`) in D. Finally we set the attribute capacity = 1 for each + arc in D [1]_. + + For a directed graph having `n` nodes and `m` arcs we derive a + directed graph D with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc (`vA`, `vB`) in D. Then for each arc (`u`, `v`) in G we add one + arc (`uB`, `vA`) in D. Finally we set the attribute capacity = 1 for + each arc in D. + + A dictionary with a mapping between nodes in the original graph and the + auxiliary digraph is stored as a graph attribute: D.graph['mapping']. + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31955-9_7 + + """ + directed = G.is_directed() + + mapping = {} + H = nx.DiGraph() + + for i, node in enumerate(G): + mapping[node] = i + H.add_node(f"{i}A", id=node) + H.add_node(f"{i}B", id=node) + H.add_edge(f"{i}A", f"{i}B", capacity=1) + + edges = [] + for source, target in G.edges(): + edges.append((f"{mapping[source]}B", f"{mapping[target]}A")) + if not directed: + edges.append((f"{mapping[target]}B", f"{mapping[source]}A")) + H.add_edges_from(edges, capacity=1) + + # Store mapping as graph attribute + H.graph["mapping"] = mapping + return H + + +@nx._dispatch +def build_auxiliary_edge_connectivity(G): + """Auxiliary digraph for computing flow based edge connectivity + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. Part of algorithm 1 in [1]_ . + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. (this is a + chapter, look for the reference of the book). + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + """ + if G.is_directed(): + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + return H + else: + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + for source, target in G.edges(): + H.add_edges_from([(source, target), (target, source)], capacity=1) + return H diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93e8cfdcb09ba54fda369d2ec0bfdab072e5490d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/generic.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..e47c4b4f5a1fbc0811fbf67274b36f87c466ff00 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/shortest_paths/generic.py @@ -0,0 +1,719 @@ +""" +Compute the shortest paths and path lengths between nodes in the graph. + +These algorithms work with undirected and directed graphs. + +""" +import warnings + +import networkx as nx + +__all__ = [ + "shortest_path", + "all_shortest_paths", + "single_source_all_shortest_paths", + "all_pairs_all_shortest_paths", + "shortest_path_length", + "average_shortest_path_length", + "has_path", +] + + +@nx._dispatch +def has_path(G, source, target): + """Returns *True* if *G* has a path from *source* to *target*. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + """ + try: + nx.shortest_path(G, source, target) + except nx.NetworkXNoPath: + return False + return True + + +@nx._dispatch(edge_attrs="weight") +def shortest_path(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. If not specified, compute shortest + paths for each possible starting node. + + target : node, optional + Ending node for path. If not specified, compute shortest + paths to all possible nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + path: list or dictionary + All returned paths include both the source and target in the path. + + If the source and target are both specified, return a single list + of nodes in a shortest path from the source to the target. + + If only the source is specified, return a dictionary keyed by + targets with a list of nodes in a shortest path from the source + to one of the targets. + + If only the target is specified, return a dictionary keyed by + sources with a list of nodes in a shortest path from one of the + sources to the target. + + If neither the source nor target are specified return a dictionary + of dictionaries with path[source][target]=[list of nodes in path]. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.shortest_path(G, source=0, target=4)) + [0, 1, 2, 3, 4] + >>> p = nx.shortest_path(G, source=0) # target not specified + >>> p[3] # shortest path from source=0 to target=3 + [0, 1, 2, 3] + >>> p = nx.shortest_path(G, target=4) # source not specified + >>> p[1] # shortest path from source=1 to target=4 + [1, 2, 3, 4] + >>> p = nx.shortest_path(G) # source, target not specified + >>> p[2][4] # shortest path from source=2 to target=4 + [2, 3, 4] + + Notes + ----- + There may be more than one shortest path between a source and target. + This returns only one of them. + + See Also + -------- + all_pairs_shortest_path + all_pairs_dijkstra_path + all_pairs_bellman_ford_path + single_source_shortest_path + single_source_dijkstra_path + single_source_bellman_ford_path + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + msg = "shortest_path for all_pairs will return an iterator in v3.3" + warnings.warn(msg, DeprecationWarning) + + # Find paths between all pairs. + if method == "unweighted": + paths = dict(nx.all_pairs_shortest_path(G)) + elif method == "dijkstra": + paths = dict(nx.all_pairs_dijkstra_path(G, weight=weight)) + else: # method == 'bellman-ford': + paths = dict(nx.all_pairs_bellman_ford_path(G, weight=weight)) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + paths = nx.single_source_shortest_path(G, target) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, target, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, target, weight=weight) + # Now flip the paths so they go from a source to the target. + for target in paths: + paths[target] = list(reversed(paths[target])) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path(G, source) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, source, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + paths = nx.bidirectional_shortest_path(G, source, target) + elif method == "dijkstra": + _, paths = nx.bidirectional_dijkstra(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path(G, source, target, weight) + return paths + + +@nx._dispatch(edge_attrs="weight") +def shortest_path_length(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest path lengths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. + If not specified, compute shortest path lengths using all nodes as + source nodes. + + target : node, optional + Ending node for path. + If not specified, compute shortest path lengths using all nodes as + target nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path length. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + length: int or iterator + If the source and target are both specified, return the length of + the shortest path from the source to the target. + + If only the source is specified, return a dict keyed by target + to the shortest path length from the source to that target. + + If only the target is specified, return a dict keyed by source + to the shortest path length from that source to the target. + + If neither the source nor target are specified, return an iterator + over (source, dictionary) where dictionary is keyed by target to + shortest path length from source to that target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.shortest_path_length(G, source=0, target=4) + 4 + >>> p = nx.shortest_path_length(G, source=0) # target not specified + >>> p[4] + 4 + >>> p = nx.shortest_path_length(G, target=4) # source not specified + >>> p[0] + 4 + >>> p = dict(nx.shortest_path_length(G)) # source,target not specified + >>> p[0][4] + 4 + + Notes + ----- + The length of the path is always 1 less than the number of nodes involved + in the path since the length measures the number of edges followed. + + For digraphs this returns the shortest directed path length. To find path + lengths in the reverse direction use G.reverse(copy=False) first to flip + the edge orientation. + + See Also + -------- + all_pairs_shortest_path_length + all_pairs_dijkstra_path_length + all_pairs_bellman_ford_path_length + single_source_shortest_path_length + single_source_dijkstra_path_length + single_source_bellman_ford_path_length + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + # Find paths between all pairs. + if method == "unweighted": + paths = nx.all_pairs_shortest_path_length(G) + elif method == "dijkstra": + paths = nx.all_pairs_dijkstra_path_length(G, weight=weight) + else: # method == 'bellman-ford': + paths = nx.all_pairs_bellman_ford_path_length(G, weight=weight) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + path_length = nx.single_source_shortest_path_length + paths = path_length(G, target) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, target, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, target, weight=weight) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path_length(G, source) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, source, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + p = nx.bidirectional_shortest_path(G, source, target) + paths = len(p) - 1 + elif method == "dijkstra": + paths = nx.dijkstra_path_length(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path_length(G, source, target, weight) + return paths + + +@nx._dispatch(edge_attrs="weight") +def average_shortest_path_length(G, weight=None, method=None): + r"""Returns the average shortest path length. + + The average shortest path length is + + .. math:: + + a =\sum_{\substack{s,t \in V \\ s\neq t}} \frac{d(s, t)}{n(n-1)} + + where `V` is the set of nodes in `G`, + `d(s, t)` is the shortest path from `s` to `t`, + and `n` is the number of nodes in `G`. + + .. versionchanged:: 3.0 + An exception is raised for directed graphs that are not strongly + connected. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'unweighted' or 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options are 'unweighted', 'dijkstra', 'bellman-ford', + 'floyd-warshall' and 'floyd-warshall-numpy'. + Other method values produce a ValueError. + The default method is 'unweighted' if `weight` is None, + otherwise the default method is 'dijkstra'. + + Raises + ------ + NetworkXPointlessConcept + If `G` is the null graph (that is, the graph on zero nodes). + + NetworkXError + If `G` is not connected (or not strongly connected, in the case + of a directed graph). + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.average_shortest_path_length(G) + 2.0 + + For disconnected graphs, you can compute the average shortest path + length for each component + + >>> G = nx.Graph([(1, 2), (3, 4)]) + >>> for C in (G.subgraph(c).copy() for c in nx.connected_components(G)): + ... print(nx.average_shortest_path_length(C)) + 1.0 + 1.0 + + """ + single_source_methods = ["unweighted", "dijkstra", "bellman-ford"] + all_pairs_methods = ["floyd-warshall", "floyd-warshall-numpy"] + supported_methods = single_source_methods + all_pairs_methods + + if method is None: + method = "unweighted" if weight is None else "dijkstra" + if method not in supported_methods: + raise ValueError(f"method not supported: {method}") + + n = len(G) + # For the special case of the null graph, raise an exception, since + # there are no paths in the null graph. + if n == 0: + msg = ( + "the null graph has no paths, thus there is no average " + "shortest path length" + ) + raise nx.NetworkXPointlessConcept(msg) + # For the special case of the trivial graph, return zero immediately. + if n == 1: + return 0 + # Shortest path length is undefined if the graph is not strongly connected. + if G.is_directed() and not nx.is_strongly_connected(G): + raise nx.NetworkXError("Graph is not strongly connected.") + # Shortest path length is undefined if the graph is not connected. + if not G.is_directed() and not nx.is_connected(G): + raise nx.NetworkXError("Graph is not connected.") + + # Compute all-pairs shortest paths. + def path_length(v): + if method == "unweighted": + return nx.single_source_shortest_path_length(G, v) + elif method == "dijkstra": + return nx.single_source_dijkstra_path_length(G, v, weight=weight) + elif method == "bellman-ford": + return nx.single_source_bellman_ford_path_length(G, v, weight=weight) + + if method in single_source_methods: + # Sum the distances for each (ordered) pair of source and target node. + s = sum(l for u in G for l in path_length(u).values()) + else: + if method == "floyd-warshall": + all_pairs = nx.floyd_warshall(G, weight=weight) + s = sum(sum(t.values()) for t in all_pairs.values()) + elif method == "floyd-warshall-numpy": + s = nx.floyd_warshall_numpy(G, weight=weight).sum() + return s / (n * (n - 1)) + + +@nx._dispatch(edge_attrs="weight") +def all_shortest_paths(G, source, target, weight=None, method="dijkstra"): + """Compute all shortest simple paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + target : node + Ending node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + NetworkXNoPath + If `target` cannot be reached from `source`. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2]) + >>> nx.add_path(G, [0, 10, 2]) + >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)]) + [[0, 1, 2], [0, 10, 2]] + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + + return _build_paths_from_predecessors({source}, target, pred) + + +@nx._dispatch(edge_attrs="weight") +def single_source_all_shortest_paths(G, source, weight=None, method="dijkstra"): + """Compute all shortest simple paths from the given source in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of dictionary + A generator of all paths between source and all nodes in the graph. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 0]) + >>> dict(nx.single_source_all_shortest_paths(G, source=0)) + {0: [[0]], 1: [[0, 1]], 2: [[0, 1, 2], [0, 3, 2]], 3: [[0, 3]]} + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + all_shortest_paths + single_source_shortest_path + all_pairs_shortest_path + all_pairs_all_shortest_paths + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + for n in G: + try: + yield n, list(_build_paths_from_predecessors({source}, n, pred)) + except nx.NetworkXNoPath: + pass + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_all_shortest_paths(G, weight=None, method="dijkstra"): + """Compute all shortest paths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of dictionary + Dictionary of arrays, keyed by source and target, of all shortest paths. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> dict(nx.all_pairs_all_shortest_paths(G))[0][2] + [[0, 1, 2], [0, 3, 2]] + >>> dict(nx.all_pairs_all_shortest_paths(G))[0][3] + [[0, 3]] + + Notes + ----- + There may be multiple shortest paths with equal lengths. Unlike + all_pairs_shortest_path, this method returns all shortest paths. + + See Also + -------- + all_pairs_shortest_path + single_source_all_shortest_paths + """ + for n in G: + yield n, dict( + single_source_all_shortest_paths(G, n, weight=weight, method=method) + ) + + +def _build_paths_from_predecessors(sources, target, pred): + """Compute all simple paths to target, given the predecessors found in + pred, terminating when any source in sources is found. + + Parameters + ---------- + sources : set + Starting nodes for path. + + target : node + Ending node for path. + + pred : dict + A dictionary of predecessor lists, keyed by node + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + NetworkXNoPath + If `target` cannot be reached from `source`. + + Notes + ----- + There may be many paths between the sources and target. If there are + cycles among the predecessors, this function will not produce all + possible paths because doing so would produce infinitely many paths + of unbounded length -- instead, we only produce simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + all_shortest_paths + bellman_ford_path + """ + if target not in pred: + raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") + + seen = {target} + stack = [[target, 0]] + top = 0 + while top >= 0: + node, i = stack[top] + if node in sources: + yield [p for p, n in reversed(stack[: top + 1])] + if len(pred[node]) > i: + stack[top][1] = i + 1 + next = pred[node][i] + if next in seen: + continue + else: + seen.add(next) + top += 1 + if top == len(stack): + stack.append([next, 0]) + else: + stack[top][:] = [next, 0] + else: + seen.discard(node) + top -= 1 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f50eed8bdae5e05a28d882f855b70cd7fb814792 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/layout.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/layout.py new file mode 100644 index 0000000000000000000000000000000000000000..fa120d670748a67cb536f48dbba20081669b96a4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/layout.py @@ -0,0 +1,1297 @@ +""" +****** +Layout +****** + +Node positioning algorithms for graph drawing. + +For `random_layout()` the possible resulting shape +is a square of side [0, scale] (default: [0, 1]) +Changing `center` shifts the layout by that amount. + +For the other layout routines, the extent is +[center - scale, center + scale] (default: [-1, 1]). + +Warning: Most layout routines have only been tested in 2-dimensions. + +""" +import networkx as nx +from networkx.utils import np_random_state + +__all__ = [ + "bipartite_layout", + "circular_layout", + "kamada_kawai_layout", + "random_layout", + "rescale_layout", + "rescale_layout_dict", + "shell_layout", + "spring_layout", + "spectral_layout", + "planar_layout", + "fruchterman_reingold_layout", + "spiral_layout", + "multipartite_layout", + "arf_layout", +] + + +def _process_params(G, center, dim): + # Some boilerplate code. + import numpy as np + + if not isinstance(G, nx.Graph): + empty_graph = nx.Graph() + empty_graph.add_nodes_from(G) + G = empty_graph + + if center is None: + center = np.zeros(dim) + else: + center = np.asarray(center) + + if len(center) != dim: + msg = "length of center coordinates must match dimension of layout" + raise ValueError(msg) + + return G, center + + +@np_random_state(3) +def random_layout(G, center=None, dim=2, seed=None): + """Position nodes uniformly at random in the unit square. + + For every node, a position is generated by choosing each of dim + coordinates uniformly at random on the interval [0.0, 1.0). + + NumPy (http://scipy.org) is required for this function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> pos = nx.random_layout(G) + + """ + import numpy as np + + G, center = _process_params(G, center, dim) + pos = seed.rand(len(G), dim) + center + pos = pos.astype(np.float32) + pos = dict(zip(G, pos)) + + return pos + + +def circular_layout(G, scale=1, center=None, dim=2): + # dim=2 only + """Position nodes on a circle. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + If dim>2, the remaining dimensions are set to zero + in the returned positions. + If dim<2, a ValueError is raised. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim < 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.circular_layout(G) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim < 2: + raise ValueError("cannot handle dimensions < 2") + + G, center = _process_params(G, center, dim) + + paddims = max(0, (dim - 2)) + + if len(G) == 0: + pos = {} + elif len(G) == 1: + pos = {nx.utils.arbitrary_element(G): center} + else: + # Discard the extra angle since it matches 0 radians. + theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi + theta = theta.astype(np.float32) + pos = np.column_stack( + [np.cos(theta), np.sin(theta), np.zeros((len(G), paddims))] + ) + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + return pos + + +def shell_layout(G, nlist=None, rotate=None, scale=1, center=None, dim=2): + """Position nodes in concentric circles. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nlist : list of lists + List of node lists for each shell. + + rotate : angle in radians (default=pi/len(nlist)) + Angle by which to rotate the starting position of each shell + relative to the starting position of the previous shell. + To recreate behavior before v2.5 use rotate=0. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> shells = [[0], [1, 2, 3]] + >>> pos = nx.shell_layout(G, shells) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + if nlist is None: + # draw the whole graph in one shell + nlist = [list(G)] + + radius_bump = scale / len(nlist) + + if len(nlist[0]) == 1: + # single node at center + radius = 0.0 + else: + # else start at r=1 + radius = radius_bump + + if rotate is None: + rotate = np.pi / len(nlist) + first_theta = rotate + npos = {} + for nodes in nlist: + # Discard the last angle (endpoint=False) since 2*pi matches 0 radians + theta = ( + np.linspace(0, 2 * np.pi, len(nodes), endpoint=False, dtype=np.float32) + + first_theta + ) + pos = radius * np.column_stack([np.cos(theta), np.sin(theta)]) + center + npos.update(zip(nodes, pos)) + radius += radius_bump + first_theta += rotate + + return npos + + +def bipartite_layout( + G, nodes, align="vertical", scale=1, center=None, aspect_ratio=4 / 3 +): + """Position nodes in two straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nodes : list or container + Nodes in one node set of the bipartite graph. + This set will be placed on left or top. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + aspect_ratio : number (default=4/3): + The ratio of the width to the height of the layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.bipartite.gnmk_random_graph(3, 5, 10, seed=123) + >>> top = nx.bipartite.sets(G)[0] + >>> pos = nx.bipartite_layout(G, top) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + height = 1 + width = aspect_ratio * height + offset = (width / 2, height / 2) + + top = dict.fromkeys(nodes) + bottom = [v for v in G if v not in top] + nodes = list(top) + bottom + + left_xs = np.repeat(0, len(top)) + right_xs = np.repeat(width, len(bottom)) + left_ys = np.linspace(0, height, len(top)) + right_ys = np.linspace(0, height, len(bottom)) + + top_pos = np.column_stack([left_xs, left_ys]) - offset + bottom_pos = np.column_stack([right_xs, right_ys]) - offset + + pos = np.concatenate([top_pos, bottom_pos]) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + return pos + + +@np_random_state(10) +def spring_layout( + G, + k=None, + pos=None, + fixed=None, + iterations=50, + threshold=1e-4, + weight="weight", + scale=1, + center=None, + dim=2, + seed=None, +): + """Position nodes using Fruchterman-Reingold force-directed algorithm. + + The algorithm simulates a force-directed representation of the network + treating edges as springs holding nodes close, while treating nodes + as repelling objects, sometimes called an anti-gravity force. + Simulation continues until the positions are close to an equilibrium. + + There are some hard-coded values: minimal distance between + nodes (0.01) and "temperature" of 0.1 to ensure nodes don't fly away. + During the simulation, `k` helps determine the distance between nodes, + though `scale` and `center` determine the size and place after + rescaling occurs at the end of the simulation. + + Fixing some nodes doesn't allow them to move in the simulation. + It also turns off the rescaling feature at the simulation's end. + In addition, setting `scale` to `None` turns off rescaling. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + k : float (default=None) + Optimal distance between nodes. If None the distance is set to + 1/sqrt(n) where n is the number of nodes. Increase this value + to move nodes farther apart. + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + random initial positions. + + fixed : list or None optional (default=None) + Nodes to keep fixed at initial position. + Nodes not in ``G.nodes`` are ignored. + ValueError raised if `fixed` specified and `pos` not. + + iterations : int optional (default=50) + Maximum number of iterations taken + + threshold: float optional (default = 1e-4) + Threshold for relative error in node position changes. + The iteration stops if the error is below this threshold. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. Larger means a stronger attractive force. + If None, then all edge weights are 1. + + scale : number or None (default: 1) + Scale factor for positions. Not used unless `fixed is None`. + If scale is None, no rescaling is performed. + + center : array-like or None + Coordinate pair around which to center the layout. + Not used unless `fixed is None`. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spring_layout(G) + + # The same using longer but equivalent function name + >>> pos = nx.fruchterman_reingold_layout(G) + """ + import numpy as np + + G, center = _process_params(G, center, dim) + + if fixed is not None: + if pos is None: + raise ValueError("nodes are fixed without positions given") + for node in fixed: + if node not in pos: + raise ValueError("nodes are fixed without positions given") + nfixed = {node: i for i, node in enumerate(G)} + fixed = np.asarray([nfixed[node] for node in fixed if node in nfixed]) + + if pos is not None: + # Determine size of existing domain to adjust initial positions + dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup) + if dom_size == 0: + dom_size = 1 + pos_arr = seed.rand(len(G), dim) * dom_size + center + + for i, n in enumerate(G): + if n in pos: + pos_arr[i] = np.asarray(pos[n]) + else: + pos_arr = None + dom_size = 1 + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G.nodes()): center} + + try: + # Sparse matrix + if len(G) < 500: # sparse solver for large graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="f") + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _sparse_fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + except ValueError: + A = nx.to_numpy_array(G, weight=weight) + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + if fixed is None and scale is not None: + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + return pos + + +fruchterman_reingold_layout = spring_layout + + +@np_random_state(7) +def _fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + # We need to calculate this in case our fixed positions force our domain + # to be much bigger than 1x1 + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype) + # the inscrutable (but fast) version + # this is still O(V^2) + # could use multilevel methods to speed this up significantly + for iteration in range(iterations): + # matrix of difference between points + delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :] + # distance between points + distance = np.linalg.norm(delta, axis=-1) + # enforce minimum distance of 0.01 + np.clip(distance, 0.01, None, out=distance) + # displacement "force" + displacement = np.einsum( + "ijk,ij->ik", delta, (k * k / distance**2 - A * distance / k) + ) + # update positions + length = np.linalg.norm(displacement, axis=-1) + length = np.where(length < 0.01, 0.1, length) + delta_pos = np.einsum("ij,i->ij", displacement, t / length) + if fixed is not None: + # don't change positions of fixed nodes + delta_pos[fixed] = 0.0 + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +@np_random_state(7) +def _sparse_fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + # Sparse version + import numpy as np + import scipy as sp + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + # make sure we have a LIst of Lists representation + try: + A = A.tolil() + except AttributeError: + A = (sp.sparse.coo_array(A)).tolil() + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # no fixed nodes + if fixed is None: + fixed = [] + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + + displacement = np.zeros((dim, nnodes)) + for iteration in range(iterations): + displacement *= 0 + # loop over rows + for i in range(A.shape[0]): + if i in fixed: + continue + # difference between this row's node position and all others + delta = (pos[i] - pos).T + # distance between points + distance = np.sqrt((delta**2).sum(axis=0)) + # enforce minimum distance of 0.01 + distance = np.where(distance < 0.01, 0.01, distance) + # the adjacency matrix row + Ai = A.getrowview(i).toarray() # TODO: revisit w/ sparse 1D container + # displacement "force" + displacement[:, i] += ( + delta * (k * k / distance**2 - Ai * distance / k) + ).sum(axis=1) + # update positions + length = np.sqrt((displacement**2).sum(axis=0)) + length = np.where(length < 0.01, 0.1, length) + delta_pos = (displacement * t / length).T + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +def kamada_kawai_layout( + G, dist=None, pos=None, weight="weight", scale=1, center=None, dim=2 +): + """Position nodes using Kamada-Kawai path-length cost-function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + dist : dict (default=None) + A two-level dictionary of optimal distances between nodes, + indexed by source and destination node. + If None, the distance is computed using shortest_path_length(). + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + circular_layout() for dim >= 2 and a linear layout for dim == 1. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.kamada_kawai_layout(G) + """ + import numpy as np + + G, center = _process_params(G, center, dim) + nNodes = len(G) + if nNodes == 0: + return {} + + if dist is None: + dist = dict(nx.shortest_path_length(G, weight=weight)) + dist_mtx = 1e6 * np.ones((nNodes, nNodes)) + for row, nr in enumerate(G): + if nr not in dist: + continue + rdist = dist[nr] + for col, nc in enumerate(G): + if nc not in rdist: + continue + dist_mtx[row][col] = rdist[nc] + + if pos is None: + if dim >= 3: + pos = random_layout(G, dim=dim) + elif dim == 2: + pos = circular_layout(G, dim=dim) + else: + pos = dict(zip(G, np.linspace(0, 1, len(G)))) + pos_arr = np.array([pos[n] for n in G]) + + pos = _kamada_kawai_solve(dist_mtx, pos_arr, dim) + + pos = rescale_layout(pos, scale=scale) + center + return dict(zip(G, pos)) + + +def _kamada_kawai_solve(dist_mtx, pos_arr, dim): + # Anneal node locations based on the Kamada-Kawai cost-function, + # using the supplied matrix of preferred inter-node distances, + # and starting locations. + + import numpy as np + import scipy as sp + + meanwt = 1e-3 + costargs = (np, 1 / (dist_mtx + np.eye(dist_mtx.shape[0]) * 1e-3), meanwt, dim) + + optresult = sp.optimize.minimize( + _kamada_kawai_costfn, + pos_arr.ravel(), + method="L-BFGS-B", + args=costargs, + jac=True, + ) + + return optresult.x.reshape((-1, dim)) + + +def _kamada_kawai_costfn(pos_vec, np, invdist, meanweight, dim): + # Cost-function and gradient for Kamada-Kawai layout algorithm + nNodes = invdist.shape[0] + pos_arr = pos_vec.reshape((nNodes, dim)) + + delta = pos_arr[:, np.newaxis, :] - pos_arr[np.newaxis, :, :] + nodesep = np.linalg.norm(delta, axis=-1) + direction = np.einsum("ijk,ij->ijk", delta, 1 / (nodesep + np.eye(nNodes) * 1e-3)) + + offset = nodesep * invdist - 1.0 + offset[np.diag_indices(nNodes)] = 0 + + cost = 0.5 * np.sum(offset**2) + grad = np.einsum("ij,ij,ijk->ik", invdist, offset, direction) - np.einsum( + "ij,ij,ijk->jk", invdist, offset, direction + ) + + # Additional parabolic term to encourage mean position to be near origin: + sumpos = np.sum(pos_arr, axis=0) + cost += 0.5 * meanweight * np.sum(sumpos**2) + grad += meanweight * sumpos + + return (cost, grad.ravel()) + + +def spectral_layout(G, weight="weight", scale=1, center=None, dim=2): + """Position nodes using the eigenvectors of the graph Laplacian. + + Using the unnormalized Laplacian, the layout shows possible clusters of + nodes which are an approximation of the ratio cut. If dim is the number of + dimensions then the positions are the entries of the dim eigenvectors + corresponding to the ascending eigenvalues starting from the second one. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spectral_layout(G) + + Notes + ----- + Directed graphs will be considered as undirected graphs when + positioning the nodes. + + For larger graphs (>500 nodes) this will use the SciPy sparse + eigenvalue solver (ARPACK). + """ + # handle some special cases that break the eigensolvers + import numpy as np + + G, center = _process_params(G, center, dim) + + if len(G) <= 2: + if len(G) == 0: + pos = np.array([]) + elif len(G) == 1: + pos = np.array([center]) + else: + pos = np.array([np.zeros(dim), np.array(center) * 2.0]) + return dict(zip(G, pos)) + try: + # Sparse matrix + if len(G) < 500: # dense solver is faster for small graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="d") + # Symmetrize directed graphs + if G.is_directed(): + A = A + np.transpose(A) + pos = _sparse_spectral(A, dim) + except (ImportError, ValueError): + # Dense matrix + A = nx.to_numpy_array(G, weight=weight) + # Symmetrize directed graphs + if G.is_directed(): + A += A.T + pos = _spectral(A, dim) + + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + return pos + + +def _spectral(A, dim=2): + # Input adjacency matrix A + # Uses dense eigenvalue solver from numpy + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix where D is diagonal of degrees + D = np.identity(nnodes, dtype=A.dtype) * np.sum(A, axis=1) + L = D - A + + eigenvalues, eigenvectors = np.linalg.eig(L) + # sort and keep smallest nonzero + index = np.argsort(eigenvalues)[1 : dim + 1] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def _sparse_spectral(A, dim=2): + # Input adjacency matrix A + # Uses sparse eigenvalue solver from scipy + # Could use multilevel methods here, see Koren "On spectral graph drawing" + import numpy as np + import scipy as sp + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "sparse_spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix + # TODO: Rm csr_array wrapper in favor of spdiags array constructor when available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, nnodes, nnodes)) + L = D - A + + k = dim + 1 + # number of Lanczos vectors for ARPACK solver.What is the right scaling? + ncv = max(2 * k + 1, int(np.sqrt(nnodes))) + # return smallest k eigenvalues and eigenvectors + eigenvalues, eigenvectors = sp.sparse.linalg.eigsh(L, k, which="SM", ncv=ncv) + index = np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def planar_layout(G, scale=1, center=None, dim=2): + """Position nodes without edge intersections. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. If G is of type + nx.PlanarEmbedding, the positions are selected accordingly. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + NetworkXException + If G is not planar + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.planar_layout(G) + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + + if isinstance(G, nx.PlanarEmbedding): + embedding = G + else: + is_planar, embedding = nx.check_planarity(G) + if not is_planar: + raise nx.NetworkXException("G is not planar.") + pos = nx.combinatorial_embedding_to_pos(embedding) + node_list = list(embedding) + pos = np.row_stack([pos[x] for x in node_list]) + pos = pos.astype(np.float64) + pos = rescale_layout(pos, scale=scale) + center + return dict(zip(node_list, pos)) + + +def spiral_layout(G, scale=1, center=None, dim=2, resolution=0.35, equidistant=False): + """Position nodes in a spiral layout. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + scale : number (default: 1) + Scale factor for positions. + center : array-like or None + Coordinate pair around which to center the layout. + dim : int, default=2 + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + resolution : float, default=0.35 + The compactness of the spiral layout returned. + Lower values result in more compressed spiral layouts. + equidistant : bool, default=False + If True, nodes will be positioned equidistant from each other + by decreasing angle further from center. + If False, nodes will be positioned at equal angles + from each other by increasing separation further from center. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spiral_layout(G) + >>> nx.draw(G, pos=pos) + + Notes + ----- + This algorithm currently only works in two dimensions. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + pos = [] + if equidistant: + chord = 1 + step = 0.5 + theta = resolution + theta += chord / (step * theta) + for _ in range(len(G)): + r = step * theta + theta += chord / r + pos.append([np.cos(theta) * r, np.sin(theta) * r]) + + else: + dist = np.arange(len(G), dtype=float) + angle = resolution * dist + pos = np.transpose(dist * np.array([np.cos(angle), np.sin(angle)])) + + pos = rescale_layout(np.array(pos), scale=scale) + center + + pos = dict(zip(G, pos)) + + return pos + + +def multipartite_layout(G, subset_key="subset", align="vertical", scale=1, center=None): + """Position nodes in layers of straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + subset_key : string (default='subset') + Key of node data to be used as layer subset. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_multipartite_graph(28, 16, 10) + >>> pos = nx.multipartite_layout(G) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + Network does not need to be a complete multipartite graph. As long as nodes + have subset_key data, they will be placed in the corresponding layers. + + """ + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + layers = {} + for v, data in G.nodes(data=True): + try: + layer = data[subset_key] + except KeyError: + msg = "all nodes must have subset_key (default='subset') as data" + raise ValueError(msg) + layers[layer] = [v] + layers.get(layer, []) + + # Sort by layer, if possible + try: + layers = sorted(layers.items()) + except TypeError: + layers = list(layers.items()) + + pos = None + nodes = [] + width = len(layers) + for i, (_, layer) in enumerate(layers): + height = len(layer) + xs = np.repeat(i, height) + ys = np.arange(0, height, dtype=float) + offset = ((width - 1) / 2, (height - 1) / 2) + layer_pos = np.column_stack([xs, ys]) - offset + if pos is None: + pos = layer_pos + else: + pos = np.concatenate([pos, layer_pos]) + nodes.extend(layer) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + return pos + + +def arf_layout( + G, + pos=None, + scaling=1, + a=1.1, + etol=1e-6, + dt=1e-3, + max_iter=1000, +): + """Arf layout for networkx + + The attractive and repulsive forces (arf) layout [1] + improves the spring layout in three ways. First, it + prevents congestion of highly connected nodes due to + strong forcing between nodes. Second, it utilizes the + layout space more effectively by preventing large gaps + that spring layout tends to create. Lastly, the arf + layout represents symmetries in the layout better than + the default spring layout. + + Parameters + ---------- + G : nx.Graph or nx.DiGraph + Networkx graph. + pos : dict + Initial position of the nodes. If set to None a + random layout will be used. + scaling : float + Scales the radius of the circular layout space. + a : float + Strength of springs between connected nodes. Should be larger than 1. The greater a, the clearer the separation ofunconnected sub clusters. + etol : float + Gradient sum of spring forces must be larger than `etol` before successful termination. + dt : float + Time step for force differential equation simulations. + max_iter : int + Max iterations before termination of the algorithm. + + References + .. [1] "Self-Organization Applied to Dynamic Network Layout", M. Geipel, + International Journal of Modern Physics C, 2007, Vol 18, No 10, pp. 1537-1549. + https://doi.org/10.1142/S0129183107011558 https://arxiv.org/abs/0704.1748 + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.grid_graph((5, 5)) + >>> pos = nx.arf_layout(G) + + """ + import warnings + + import numpy as np + + if a <= 1: + msg = "The parameter a should be larger than 1" + raise ValueError(msg) + + pos_tmp = nx.random_layout(G) + if pos is None: + pos = pos_tmp + else: + for node in G.nodes(): + if node not in pos: + pos[node] = pos_tmp[node].copy() + + # Initialize spring constant matrix + N = len(G) + # No nodes no computation + if N == 0: + return pos + + # init force of springs + K = np.ones((N, N)) - np.eye(N) + node_order = {node: i for i, node in enumerate(G)} + for x, y in G.edges(): + if x != y: + idx, jdx = (node_order[i] for i in (x, y)) + K[idx, jdx] = a + + # vectorize values + p = np.asarray(list(pos.values())) + + # equation 10 in [1] + rho = scaling * np.sqrt(N) + + # looping variables + error = etol + 1 + n_iter = 0 + while error > etol: + diff = p[:, np.newaxis] - p[np.newaxis] + A = np.linalg.norm(diff, axis=-1)[..., np.newaxis] + # attraction_force - repulsions force + # suppress nans due to division; caused by diagonal set to zero. + # Does not affect the computation due to nansum + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + change = K[..., np.newaxis] * diff - rho / A * diff + change = np.nansum(change, axis=0) + p += change * dt + + error = np.linalg.norm(change, axis=-1).sum() + if n_iter > max_iter: + break + n_iter += 1 + return dict(zip(G.nodes(), p)) + + +def rescale_layout(pos, scale=1): + """Returns scaled position array to (-scale, scale) in all axes. + + The function acts on NumPy arrays which hold position information. + Each position is one row of the array. The dimension of the space + equals the number of columns. Each coordinate in one column. + + To rescale, the mean (center) is subtracted from each axis separately. + Then all values are scaled so that the largest magnitude value + from all axes equals `scale` (thus, the aspect ratio is preserved). + The resulting NumPy Array is returned (order of rows unchanged). + + Parameters + ---------- + pos : numpy array + positions to be scaled. Each row is a position. + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : numpy array + scaled positions. Each row is a position. + + See Also + -------- + rescale_layout_dict + """ + import numpy as np + + # Find max length over all dimensions + pos -= pos.mean(axis=0) + lim = np.abs(pos).max() # max coordinate for all axes + # rescale to (-scale, scale) in all directions, preserves aspect + if lim > 0: + pos *= scale / lim + return pos + + +def rescale_layout_dict(pos, scale=1): + """Return a dictionary of scaled positions keyed by node + + Parameters + ---------- + pos : A dictionary of positions keyed by node + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : A dictionary of positions keyed by node + + Examples + -------- + >>> import numpy as np + >>> pos = {0: np.array((0, 0)), 1: np.array((1, 1)), 2: np.array((0.5, 0.5))} + >>> nx.rescale_layout_dict(pos) + {0: array([-1., -1.]), 1: array([1., 1.]), 2: array([0., 0.])} + + >>> pos = {0: np.array((0, 0)), 1: np.array((-1, 1)), 2: np.array((-0.5, 0.5))} + >>> nx.rescale_layout_dict(pos, scale=2) + {0: array([ 2., -2.]), 1: array([-2., 2.]), 2: array([0., 0.])} + + See Also + -------- + rescale_layout + """ + import numpy as np + + if not pos: # empty_graph + return {} + pos_v = np.array(list(pos.values())) + pos_v = rescale_layout(pos_v, scale=scale) + return dict(zip(pos, pos_v)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/nx_latex.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/nx_latex.py new file mode 100644 index 0000000000000000000000000000000000000000..6312f71505e79bf5a2e2d9979274ba2a08b2e32b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/nx_latex.py @@ -0,0 +1,571 @@ +r""" +***** +LaTeX +***** + +Export NetworkX graphs in LaTeX format using the TikZ library within TeX/LaTeX. +Usually, you will want the drawing to appear in a figure environment so +you use ``to_latex(G, caption="A caption")``. If you want the raw +drawing commands without a figure environment use :func:`to_latex_raw`. +And if you want to write to a file instead of just returning the latex +code as a string, use ``write_latex(G, "filename.tex", caption="A caption")``. + +To construct a figure with subfigures for each graph to be shown, provide +``to_latex`` or ``write_latex`` a list of graphs, a list of subcaptions, +and a number of rows of subfigures inside the figure. + +To be able to refer to the figures or subfigures in latex using ``\\ref``, +the keyword ``latex_label`` is available for figures and `sub_labels` for +a list of labels, one for each subfigure. + +We intend to eventually provide an interface to the TikZ Graph +features which include e.g. layout algorithms. + +Let us know via github what you'd like to see available, or better yet +give us some code to do it, or even better make a github pull request +to add the feature. + +The TikZ approach +================= +Drawing options can be stored on the graph as node/edge attributes, or +can be provided as dicts keyed by node/edge to a string of the options +for that node/edge. Similarly a label can be shown for each node/edge +by specifying the labels as graph node/edge attributes or by providing +a dict keyed by node/edge to the text to be written for that node/edge. + +Options for the tikzpicture environment (e.g. "[scale=2]") can be provided +via a keyword argument. Similarly default node and edge options can be +provided through keywords arguments. The default node options are applied +to the single TikZ "path" that draws all nodes (and no edges). The default edge +options are applied to a TikZ "scope" which contains a path for each edge. + +Examples +======== +>>> G = nx.path_graph(3) +>>> nx.write_latex(G, "just_my_figure.tex", as_document=True) +>>> nx.write_latex(G, "my_figure.tex", caption="A path graph", latex_label="fig1") +>>> latex_code = nx.to_latex(G) # a string rather than a file + +You can change many features of the nodes and edges. + +>>> G = nx.path_graph(4, create_using=nx.DiGraph) +>>> pos = {n: (n, n) for n in G} # nodes set on a line + +>>> G.nodes[0]["style"] = "blue" +>>> G.nodes[2]["style"] = "line width=3,draw" +>>> G.nodes[3]["label"] = "Stop" +>>> G.edges[(0, 1)]["label"] = "1st Step" +>>> G.edges[(0, 1)]["label_opts"] = "near start" +>>> G.edges[(1, 2)]["style"] = "line width=3" +>>> G.edges[(1, 2)]["label"] = "2nd Step" +>>> G.edges[(2, 3)]["style"] = "green" +>>> G.edges[(2, 3)]["label"] = "3rd Step" +>>> G.edges[(2, 3)]["label_opts"] = "near end" + +>>> nx.write_latex(G, "latex_graph.tex", pos=pos, as_document=True) + +Then compile the LaTeX using something like ``pdflatex latex_graph.tex`` +and view the pdf file created: ``latex_graph.pdf``. + +If you want **subfigures** each containing one graph, you can input a list of graphs. + +>>> H1 = nx.path_graph(4) +>>> H2 = nx.complete_graph(4) +>>> H3 = nx.path_graph(8) +>>> H4 = nx.complete_graph(8) +>>> graphs = [H1, H2, H3, H4] +>>> caps = ["Path 4", "Complete graph 4", "Path 8", "Complete graph 8"] +>>> lbls = ["fig2a", "fig2b", "fig2c", "fig2d"] +>>> nx.write_latex(graphs, "subfigs.tex", n_rows=2, sub_captions=caps, sub_labels=lbls) +>>> latex_code = nx.to_latex(graphs, n_rows=2, sub_captions=caps, sub_labels=lbls) + +>>> node_color = {0: "red", 1: "orange", 2: "blue", 3: "gray!90"} +>>> edge_width = {e: "line width=1.5" for e in H3.edges} +>>> pos = nx.circular_layout(H3) +>>> latex_code = nx.to_latex(H3, pos, node_options=node_color, edge_options=edge_width) +>>> print(latex_code) +\documentclass{report} +\usepackage{tikz} +\usepackage{subcaption} + +\begin{document} +\begin{figure} + \begin{tikzpicture} + \draw + (1.0, 0.0) node[red] (0){0} + (0.707, 0.707) node[orange] (1){1} + (-0.0, 1.0) node[blue] (2){2} + (-0.707, 0.707) node[gray!90] (3){3} + (-1.0, -0.0) node (4){4} + (-0.707, -0.707) node (5){5} + (0.0, -1.0) node (6){6} + (0.707, -0.707) node (7){7}; + \begin{scope}[-] + \draw[line width=1.5] (0) to (1); + \draw[line width=1.5] (1) to (2); + \draw[line width=1.5] (2) to (3); + \draw[line width=1.5] (3) to (4); + \draw[line width=1.5] (4) to (5); + \draw[line width=1.5] (5) to (6); + \draw[line width=1.5] (6) to (7); + \end{scope} + \end{tikzpicture} +\end{figure} +\end{document} + +Notes +----- +If you want to change the preamble/postamble of the figure/document/subfigure +environment, use the keyword arguments: `figure_wrapper`, `document_wrapper`, +`subfigure_wrapper`. The default values are stored in private variables +e.g. ``nx.nx_layout._DOCUMENT_WRAPPER`` + +References +---------- +TikZ: https://tikz.dev/ + +TikZ options details: https://tikz.dev/tikz-actions +""" +import numbers +import os + +import networkx as nx + +__all__ = [ + "to_latex_raw", + "to_latex", + "write_latex", +] + + +@nx.utils.not_implemented_for("multigraph") +def to_latex_raw( + G, + pos="pos", + tikz_options="", + default_node_options="", + node_options="node_options", + node_label="label", + default_edge_options="", + edge_options="edge_options", + edge_label="label", + edge_label_options="edge_label_options", +): + """Return a string of the LaTeX/TikZ code to draw `G` + + This function produces just the code for the tikzpicture + without any enclosing environment. + + Parameters + ========== + G : NetworkX graph + The NetworkX graph to be drawn + pos : string or dict (default "pos") + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + + Returns + ======= + latex_code : string + The text string which draws the desired graph(s) when compiled by LaTeX. + + See Also + ======== + to_latex + write_latex + """ + i4 = "\n " + i8 = "\n " + + # set up position dict + # TODO allow pos to be None and use a nice TikZ default + if not isinstance(pos, dict): + pos = nx.get_node_attributes(G, pos) + if not pos: + # circular layout with radius 2 + pos = {n: f"({round(360.0 * i / len(G), 3)}:2)" for i, n in enumerate(G)} + for node in G: + if node not in pos: + raise nx.NetworkXError(f"node {node} has no specified pos {pos}") + posnode = pos[node] + if not isinstance(posnode, str): + try: + posx, posy = posnode + pos[node] = f"({round(posx, 3)}, {round(posy, 3)})" + except (TypeError, ValueError): + msg = f"position pos[{node}] is not 2-tuple or a string: {posnode}" + raise nx.NetworkXError(msg) + + # set up all the dicts + if not isinstance(node_options, dict): + node_options = nx.get_node_attributes(G, node_options) + if not isinstance(node_label, dict): + node_label = nx.get_node_attributes(G, node_label) + if not isinstance(edge_options, dict): + edge_options = nx.get_edge_attributes(G, edge_options) + if not isinstance(edge_label, dict): + edge_label = nx.get_edge_attributes(G, edge_label) + if not isinstance(edge_label_options, dict): + edge_label_options = nx.get_edge_attributes(G, edge_label_options) + + # process default options (add brackets or not) + topts = "" if tikz_options == "" else f"[{tikz_options.strip('[]')}]" + defn = "" if default_node_options == "" else f"[{default_node_options.strip('[]')}]" + linestyle = f"{'->' if G.is_directed() else '-'}" + if default_edge_options == "": + defe = "[" + linestyle + "]" + elif "-" in default_edge_options: + defe = default_edge_options + else: + defe = f"[{linestyle},{default_edge_options.strip('[]')}]" + + # Construct the string line by line + result = " \\begin{tikzpicture}" + topts + result += i4 + " \\draw" + defn + # load the nodes + for n in G: + # node options goes inside square brackets + nopts = f"[{node_options[n].strip('[]')}]" if n in node_options else "" + # node text goes inside curly brackets {} + ntext = f"{{{node_label[n]}}}" if n in node_label else f"{{{n}}}" + + result += i8 + f"{pos[n]} node{nopts} ({n}){ntext}" + result += ";\n" + + # load the edges + result += " \\begin{scope}" + defe + for edge in G.edges: + u, v = edge[:2] + e_opts = f"{edge_options[edge]}".strip("[]") if edge in edge_options else "" + # add loop options for selfloops if not present + if u == v and "loop" not in e_opts: + e_opts = "loop," + e_opts + e_opts = f"[{e_opts}]" if e_opts != "" else "" + # TODO -- handle bending of multiedges + + els = edge_label_options[edge] if edge in edge_label_options else "" + # edge label options goes inside square brackets [] + els = f"[{els.strip('[]')}]" + # edge text is drawn using the TikZ node command inside curly brackets {} + e_label = f" node{els} {{{edge_label[edge]}}}" if edge in edge_label else "" + + result += i8 + f"\\draw{e_opts} ({u}) to{e_label} ({v});" + + result += "\n \\end{scope}\n \\end{tikzpicture}\n" + return result + + +_DOC_WRAPPER_TIKZ = r"""\documentclass{{report}} +\usepackage{{tikz}} +\usepackage{{subcaption}} + +\begin{{document}} +{content} +\end{{document}}""" + + +_FIG_WRAPPER = r"""\begin{{figure}} +{content}{caption}{label} +\end{{figure}}""" + + +_SUBFIG_WRAPPER = r""" \begin{{subfigure}}{{{size}\textwidth}} +{content}{caption}{label} + \end{{subfigure}}""" + + +def to_latex( + Gbunch, + pos="pos", + tikz_options="", + default_node_options="", + node_options="node_options", + node_label="node_label", + default_edge_options="", + edge_options="edge_options", + edge_label="edge_label", + edge_label_options="edge_label_options", + caption="", + latex_label="", + sub_captions=None, + sub_labels=None, + n_rows=1, + as_document=True, + document_wrapper=_DOC_WRAPPER_TIKZ, + figure_wrapper=_FIG_WRAPPER, + subfigure_wrapper=_SUBFIG_WRAPPER, +): + """Return latex code to draw the graph(s) in `Gbunch` + + The TikZ drawing utility in LaTeX is used to draw the graph(s). + If `Gbunch` is a graph, it is drawn in a figure environment. + If `Gbunch` is an iterable of graphs, each is drawn in a subfigure environment + within a single figure environment. + + If `as_document` is True, the figure is wrapped inside a document environment + so that the resulting string is ready to be compiled by LaTeX. Otherwise, + the string is ready for inclusion in a larger tex document using ``\\include`` + or ``\\input`` statements. + + Parameters + ========== + Gbunch : NetworkX graph or iterable of NetworkX graphs + The NetworkX graph to be drawn or an iterable of graphs + to be drawn inside subfigures of a single figure. + pos : string or list of strings + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + If you are drawing many graphs in subfigures, use a list of position dicts. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + caption : string + The caption string for the figure environment + latex_label : string + The latex label used for the figure for easy referral from the main text + sub_captions : list of strings + The sub_caption string for each subfigure in the figure + sub_latex_labels : list of strings + The latex label for each subfigure in the figure + n_rows : int + The number of rows of subfigures to arrange for multiple graphs + as_document : bool + Whether to wrap the latex code in a document environment for compiling + document_wrapper : formatted text string with variable ``content``. + This text is called to evaluate the content embedded in a document + environment with a preamble setting up TikZ. + figure_wrapper : formatted text string + This text is evaluated with variables ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + subfigure_wrapper : formatted text string + This text evaluate variables ``size``, ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + The size is the vertical size of each row of subfigures as a fraction. + + Returns + ======= + latex_code : string + The text string which draws the desired graph(s) when compiled by LaTeX. + + See Also + ======== + write_latex + to_latex_raw + """ + if hasattr(Gbunch, "adj"): + raw = to_latex_raw( + Gbunch, + pos, + tikz_options, + default_node_options, + node_options, + node_label, + default_edge_options, + edge_options, + edge_label, + edge_label_options, + ) + else: # iterator of graphs + sbf = subfigure_wrapper + size = 1 / n_rows + + N = len(Gbunch) + if isinstance(pos, (str, dict)): + pos = [pos] * N + if sub_captions is None: + sub_captions = [""] * N + if sub_labels is None: + sub_labels = [""] * N + if not (len(Gbunch) == len(pos) == len(sub_captions) == len(sub_labels)): + raise nx.NetworkXError( + "length of Gbunch, sub_captions and sub_figures must agree" + ) + + raw = "" + for G, pos, subcap, sublbl in zip(Gbunch, pos, sub_captions, sub_labels): + subraw = to_latex_raw( + G, + pos, + tikz_options, + default_node_options, + node_options, + node_label, + default_edge_options, + edge_options, + edge_label, + edge_label_options, + ) + cap = f" \\caption{{{subcap}}}" if subcap else "" + lbl = f"\\label{{{sublbl}}}" if sublbl else "" + raw += sbf.format(size=size, content=subraw, caption=cap, label=lbl) + raw += "\n" + + # put raw latex code into a figure environment and optionally into a document + raw = raw[:-1] + cap = f"\n \\caption{{{caption}}}" if caption else "" + lbl = f"\\label{{{latex_label}}}" if latex_label else "" + fig = figure_wrapper.format(content=raw, caption=cap, label=lbl) + if as_document: + return document_wrapper.format(content=fig) + return fig + + +@nx.utils.open_file(1, mode="w") +def write_latex(Gbunch, path, **options): + """Write the latex code to draw the graph(s) onto `path`. + + This convenience function creates the latex drawing code as a string + and writes that to a file ready to be compiled when `as_document` is True + or ready to be ``import`` ed or ``include`` ed into your main LaTeX document. + + The `path` argument can be a string filename or a file handle to write to. + + Parameters + ---------- + Gbunch : NetworkX graph or iterable of NetworkX graphs + If Gbunch is a graph, it is drawn in a figure environment. + If Gbunch is an iterable of graphs, each is drawn in a subfigure + environment within a single figure environment. + path : filename + Filename or file handle to write to + options : dict + By default, TikZ is used with options: (others are ignored):: + + pos : string or dict or list + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + If you are drawing many graphs in subfigures, use a list of position dicts. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + caption : string + The caption string for the figure environment + latex_label : string + The latex label used for the figure for easy referral from the main text + sub_captions : list of strings + The sub_caption string for each subfigure in the figure + sub_latex_labels : list of strings + The latex label for each subfigure in the figure + n_rows : int + The number of rows of subfigures to arrange for multiple graphs + as_document : bool + Whether to wrap the latex code in a document environment for compiling + document_wrapper : formatted text string with variable ``content``. + This text is called to evaluate the content embedded in a document + environment with a preamble setting up the TikZ syntax. + figure_wrapper : formatted text string + This text is evaluated with variables ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + subfigure_wrapper : formatted text string + This text evaluate variables ``size``, ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + The size is the vertical size of each row of subfigures as a fraction. + + See Also + ======== + to_latex + """ + path.write(to_latex(Gbunch, **options)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_pydot.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_pydot.py new file mode 100644 index 0000000000000000000000000000000000000000..a58aa1647c983fffe79cf915fccb107f1863b2ea --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_pydot.py @@ -0,0 +1,190 @@ +"""Unit tests for pydot drawing functions.""" +import os +import tempfile +from io import StringIO + +import pytest + +import networkx as nx +from networkx.utils import graphs_equal + +pydot = pytest.importorskip("pydot") + + +@pytest.mark.xfail +class TestPydot: + def pydot_checks(self, G, prog): + """ + Validate :mod:`pydot`-based usage of the passed NetworkX graph with the + passed basename of an external GraphViz command (e.g., `dot`, `neato`). + """ + + # Set the name of this graph to... "G". Failing to do so will + # subsequently trip an assertion expecting this name. + G.graph["name"] = "G" + + # Add arbitrary nodes and edges to the passed empty graph. + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("A", "D")]) + G.add_node("E") + + # Validate layout of this graph with the passed GraphViz command. + graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog) + assert isinstance(graph_layout, dict) + + # Convert this graph into a "pydot.Dot" instance. + P = nx.nx_pydot.to_pydot(G) + + # Convert this "pydot.Dot" instance back into a graph of the same type. + G2 = G.__class__(nx.nx_pydot.from_pydot(P)) + + # Validate the original and resulting graphs to be the same. + assert graphs_equal(G, G2) + + fd, fname = tempfile.mkstemp() + + # Serialize this "pydot.Dot" instance to a temporary file in dot format + P.write_raw(fname) + + # Deserialize a list of new "pydot.Dot" instances back from this file. + Pin_list = pydot.graph_from_dot_file(path=fname, encoding="utf-8") + + # Validate this file to contain only one graph. + assert len(Pin_list) == 1 + + # The single "pydot.Dot" instance deserialized from this file. + Pin = Pin_list[0] + + # Sorted list of all nodes in the original "pydot.Dot" instance. + n1 = sorted(p.get_name() for p in P.get_node_list()) + + # Sorted list of all nodes in the deserialized "pydot.Dot" instance. + n2 = sorted(p.get_name() for p in Pin.get_node_list()) + + # Validate these instances to contain the same nodes. + assert n1 == n2 + + # Sorted list of all edges in the original "pydot.Dot" instance. + e1 = sorted((e.get_source(), e.get_destination()) for e in P.get_edge_list()) + + # Sorted list of all edges in the original "pydot.Dot" instance. + e2 = sorted((e.get_source(), e.get_destination()) for e in Pin.get_edge_list()) + + # Validate these instances to contain the same edges. + assert e1 == e2 + + # Deserialize a new graph of the same type back from this file. + Hin = nx.nx_pydot.read_dot(fname) + Hin = G.__class__(Hin) + + # Validate the original and resulting graphs to be the same. + assert graphs_equal(G, Hin) + + os.close(fd) + os.unlink(fname) + + def test_undirected(self): + self.pydot_checks(nx.Graph(), prog="neato") + + def test_directed(self): + self.pydot_checks(nx.DiGraph(), prog="dot") + + def test_read_write(self): + G = nx.MultiGraph() + G.graph["name"] = "G" + G.add_edge("1", "2", key="0") # read assumes strings + fh = StringIO() + nx.nx_pydot.write_dot(G, fh) + fh.seek(0) + H = nx.nx_pydot.read_dot(fh) + assert graphs_equal(G, H) + + +def test_pydot_issue_258(): + G = nx.Graph([("Example:A", 1)]) + with pytest.raises(ValueError): + nx.nx_pydot.to_pydot(G) + with pytest.raises(ValueError): + nx.nx_pydot.pydot_layout(G) + + G = nx.Graph() + G.add_node("1.2", style="filled", fillcolor="red:yellow") + with pytest.raises(ValueError): + nx.nx_pydot.to_pydot(G) + G.remove_node("1.2") + G.add_node("1.2", style="filled", fillcolor='"red:yellow"') + assert ( + G.nodes.data() == nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).nodes.data() + ) + + G = nx.DiGraph() + G.add_edge("1", "2", foo="bar:1") + with pytest.raises(ValueError): + nx.nx_pydot.to_pydot(G) + G = nx.DiGraph() + G.add_edge("1", "2", foo='"bar:1"') + assert G["1"]["2"] == nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G))["1"]["2"] + + G = nx.MultiGraph() + G.add_edge("1", "2", foo="b:1") + G.add_edge("1", "2", bar="foo:foo") + with pytest.raises(ValueError): + nx.nx_pydot.to_pydot(G) + G = nx.MultiGraph() + G.add_edge("1", "2", foo='"b:1"') + G.add_edge("1", "2", bar='"foo:foo"') + # Keys as integers aren't preserved in the conversion. They are read as strings. + assert [attr for _, _, attr in G.edges.data()] == [ + attr + for _, _, attr in nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).edges.data() + ] + + G = nx.Graph() + G.add_edge("1", "2") + G["1"]["2"]["f:oo"] = "bar" + with pytest.raises(ValueError): + nx.nx_pydot.to_pydot(G) + G = nx.Graph() + G.add_edge("1", "2") + G["1"]["2"]['"f:oo"'] = "bar" + assert G["1"]["2"] == nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G))["1"]["2"] + + G = nx.Graph([('"Example:A"', 1)]) + layout = nx.nx_pydot.pydot_layout(G) + assert isinstance(layout, dict) + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_hashable_pydot(graph_type): + # gh-5790 + G = graph_type() + G.add_edge("5", frozenset([1]), t='"Example:A"', l=False) + G.add_edge("1", 2, w=True, t=("node1",), l=frozenset(["node1"])) + G.add_edge("node", (3, 3), w="string") + + assert [ + {"t": '"Example:A"', "l": "False"}, + {"w": "True", "t": "('node1',)", "l": "frozenset({'node1'})"}, + {"w": "string"}, + ] == [ + attr + for _, _, attr in nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).edges.data() + ] + + assert {str(i) for i in G.nodes()} == set( + nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).nodes + ) + + +def test_pydot_numerical_name(): + G = nx.Graph() + G.add_edges_from([("A", "B"), (0, 1)]) + graph_layout = nx.nx_pydot.pydot_layout(G, prog="dot") + assert isinstance(graph_layout, dict) + assert "0" not in graph_layout + assert 0 in graph_layout + assert "1" not in graph_layout + assert 1 in graph_layout + assert "A" in graph_layout + assert "B" in graph_layout diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/test_import.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/test_import.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5a25703dde2bf4bbcfb4613f1ca8efb71da4d2f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/test_import.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b0d1886d9c34484327b4a5fbedb55a10899e0f7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/rcm.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/rcm.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae604caef07ccd7ede6ead17faaa1a5c9839aa2b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/rcm.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/decorators.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..cc15882da72891a855b727c117b9125d196588c2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/decorators.py @@ -0,0 +1,1270 @@ +import bz2 +import collections +import gzip +import inspect +import itertools +import re +import warnings +from collections import defaultdict +from contextlib import contextmanager +from functools import wraps +from inspect import Parameter, signature +from os.path import splitext +from pathlib import Path + +import networkx as nx +from networkx.utils import create_py_random_state, create_random_state + +__all__ = [ + "not_implemented_for", + "open_file", + "nodes_or_number", + "np_random_state", + "py_random_state", + "argmap", + "deprecate_positional_args", +] + + +def not_implemented_for(*graph_types): + """Decorator to mark algorithms as not implemented + + Parameters + ---------- + graph_types : container of strings + Entries must be one of "directed", "undirected", "multigraph", or "graph". + + Returns + ------- + _require : function + The decorated function. + + Raises + ------ + NetworkXNotImplemented + If any of the packages cannot be imported + + Notes + ----- + Multiple types are joined logically with "and". + For "or" use multiple @not_implemented_for() lines. + + Examples + -------- + Decorate functions like this:: + + @not_implemented_for("directed") + def sp_function(G): + pass + + # rule out MultiDiGraph + @not_implemented_for("directed","multigraph") + def sp_np_function(G): + pass + + # rule out all except DiGraph + @not_implemented_for("undirected") + @not_implemented_for("multigraph") + def sp_np_function(G): + pass + """ + if ("directed" in graph_types) and ("undirected" in graph_types): + raise ValueError("Function not implemented on directed AND undirected graphs?") + if ("multigraph" in graph_types) and ("graph" in graph_types): + raise ValueError("Function not implemented on graph AND multigraphs?") + if not set(graph_types) < {"directed", "undirected", "multigraph", "graph"}: + raise KeyError( + "use one or more of directed, undirected, multigraph, graph. " + f"You used {graph_types}" + ) + + # 3-way logic: True if "directed" input, False if "undirected" input, else None + dval = ("directed" in graph_types) or "undirected" not in graph_types and None + mval = ("multigraph" in graph_types) or "graph" not in graph_types and None + errmsg = f"not implemented for {' '.join(graph_types)} type" + + def _not_implemented_for(g): + if (mval is None or mval == g.is_multigraph()) and ( + dval is None or dval == g.is_directed() + ): + raise nx.NetworkXNotImplemented(errmsg) + + return g + + return argmap(_not_implemented_for, 0) + + +# To handle new extensions, define a function accepting a `path` and `mode`. +# Then add the extension to _dispatch_dict. +fopeners = { + ".gz": gzip.open, + ".gzip": gzip.open, + ".bz2": bz2.BZ2File, +} +_dispatch_dict = defaultdict(lambda: open, **fopeners) + + +def open_file(path_arg, mode="r"): + """Decorator to ensure clean opening and closing of files. + + Parameters + ---------- + path_arg : string or int + Name or index of the argument that is a path. + + mode : str + String for opening mode. + + Returns + ------- + _open_file : function + Function which cleanly executes the io. + + Examples + -------- + Decorate functions like this:: + + @open_file(0,"r") + def read_function(pathname): + pass + + @open_file(1,"w") + def write_function(G, pathname): + pass + + @open_file(1,"w") + def write_function(G, pathname="graph.dot"): + pass + + @open_file("pathname","w") + def write_function(G, pathname="graph.dot"): + pass + + @open_file("path", "w+") + def another_function(arg, **kwargs): + path = kwargs["path"] + pass + + Notes + ----- + Note that this decorator solves the problem when a path argument is + specified as a string, but it does not handle the situation when the + function wants to accept a default of None (and then handle it). + + Here is an example of how to handle this case:: + + @open_file("path") + def some_function(arg1, arg2, path=None): + if path is None: + fobj = tempfile.NamedTemporaryFile(delete=False) + else: + # `path` could have been a string or file object or something + # similar. In any event, the decorator has given us a file object + # and it will close it for us, if it should. + fobj = path + + try: + fobj.write("blah") + finally: + if path is None: + fobj.close() + + Normally, we'd want to use "with" to ensure that fobj gets closed. + However, the decorator will make `path` a file object for us, + and using "with" would undesirably close that file object. + Instead, we use a try block, as shown above. + When we exit the function, fobj will be closed, if it should be, by the decorator. + """ + + def _open_file(path): + # Now we have the path_arg. There are two types of input to consider: + # 1) string representing a path that should be opened + # 2) an already opened file object + if isinstance(path, str): + ext = splitext(path)[1] + elif isinstance(path, Path): + # path is a pathlib reference to a filename + ext = path.suffix + path = str(path) + else: + # could be None, or a file handle, in which case the algorithm will deal with it + return path, lambda: None + + fobj = _dispatch_dict[ext](path, mode=mode) + return fobj, lambda: fobj.close() + + return argmap(_open_file, path_arg, try_finally=True) + + +def nodes_or_number(which_args): + """Decorator to allow number of nodes or container of nodes. + + With this decorator, the specified argument can be either a number or a container + of nodes. If it is a number, the nodes used are `range(n)`. + This allows `nx.complete_graph(50)` in place of `nx.complete_graph(list(range(50)))`. + And it also allows `nx.complete_graph(any_list_of_nodes)`. + + Parameters + ---------- + which_args : string or int or sequence of strings or ints + If string, the name of the argument to be treated. + If int, the index of the argument to be treated. + If more than one node argument is allowed, can be a list of locations. + + Returns + ------- + _nodes_or_numbers : function + Function which replaces int args with ranges. + + Examples + -------- + Decorate functions like this:: + + @nodes_or_number("nodes") + def empty_graph(nodes): + # nodes is converted to a list of nodes + + @nodes_or_number(0) + def empty_graph(nodes): + # nodes is converted to a list of nodes + + @nodes_or_number(["m1", "m2"]) + def grid_2d_graph(m1, m2, periodic=False): + # m1 and m2 are each converted to a list of nodes + + @nodes_or_number([0, 1]) + def grid_2d_graph(m1, m2, periodic=False): + # m1 and m2 are each converted to a list of nodes + + @nodes_or_number(1) + def full_rary_tree(r, n) + # presumably r is a number. It is not handled by this decorator. + # n is converted to a list of nodes + """ + + def _nodes_or_number(n): + try: + nodes = list(range(n)) + except TypeError: + nodes = tuple(n) + else: + if n < 0: + raise nx.NetworkXError(f"Negative number of nodes not valid: {n}") + return (n, nodes) + + try: + iter_wa = iter(which_args) + except TypeError: + iter_wa = (which_args,) + + return argmap(_nodes_or_number, *iter_wa) + + +def np_random_state(random_state_argument): + """Decorator to generate a `numpy.random.RandomState` instance. + + The decorator processes the argument indicated by `random_state_argument` + using :func:`nx.utils.create_random_state`. + The argument value can be a seed (integer), or a `numpy.random.RandomState` + instance or (`None` or `numpy.random`). The latter options use the glocal + random number generator used by `numpy.random`. + The result is a `numpy.random.RandomState` instance. + + Parameters + ---------- + random_state_argument : string or int + The name or index of the argument to be converted + to a `numpy.random.RandomState` instance. + + Returns + ------- + _random_state : function + Function whose random_state keyword argument is a RandomState instance. + + Examples + -------- + Decorate functions like this:: + + @np_random_state("seed") + def random_float(seed=None): + return seed.rand() + + @np_random_state(0) + def random_float(rng=None): + return rng.rand() + + @np_random_state(1) + def random_array(dims, random_state=1): + return random_state.rand(*dims) + + See Also + -------- + py_random_state + """ + return argmap(create_random_state, random_state_argument) + + +def py_random_state(random_state_argument): + """Decorator to generate a random.Random instance (or equiv). + + The decorator processes the argument indicated by `random_state_argument` + using :func:`nx.utils.create_py_random_state`. + The argument value can be a seed (integer), or a random number generator:: + + If int, return a random.Random instance set with seed=int. + If random.Random instance, return it. + If None or the `random` package, return the global random number + generator used by `random`. + If np.random package, return the global numpy random number + generator wrapped in a PythonRandomInterface class. + If np.random.RandomState instance, return it wrapped in + PythonRandomInterface + If a PythonRandomInterface instance, return it + + Parameters + ---------- + random_state_argument : string or int + The name of the argument or the index of the argument in args that is + to be converted to the random.Random instance or numpy.random.RandomState + instance that mimics basic methods of random.Random. + + Returns + ------- + _random_state : function + Function whose random_state_argument is converted to a Random instance. + + Examples + -------- + Decorate functions like this:: + + @py_random_state("random_state") + def random_float(random_state=None): + return random_state.rand() + + @py_random_state(0) + def random_float(rng=None): + return rng.rand() + + @py_random_state(1) + def random_array(dims, seed=12345): + return seed.rand(*dims) + + See Also + -------- + np_random_state + """ + + return argmap(create_py_random_state, random_state_argument) + + +class argmap: + """A decorator to apply a map to arguments before calling the function + + This class provides a decorator that maps (transforms) arguments of the function + before the function is called. Thus for example, we have similar code + in many functions to determine whether an argument is the number of nodes + to be created, or a list of nodes to be handled. The decorator provides + the code to accept either -- transforming the indicated argument into a + list of nodes before the actual function is called. + + This decorator class allows us to process single or multiple arguments. + The arguments to be processed can be specified by string, naming the argument, + or by index, specifying the item in the args list. + + Parameters + ---------- + func : callable + The function to apply to arguments + + *args : iterable of (int, str or tuple) + A list of parameters, specified either as strings (their names), ints + (numerical indices) or tuples, which may contain ints, strings, and + (recursively) tuples. Each indicates which parameters the decorator + should map. Tuples indicate that the map function takes (and returns) + multiple parameters in the same order and nested structure as indicated + here. + + try_finally : bool (default: False) + When True, wrap the function call in a try-finally block with code + for the finally block created by `func`. This is used when the map + function constructs an object (like a file handle) that requires + post-processing (like closing). + + Note: try_finally decorators cannot be used to decorate generator + functions. + + Examples + -------- + Most of these examples use `@argmap(...)` to apply the decorator to + the function defined on the next line. + In the NetworkX codebase however, `argmap` is used within a function to + construct a decorator. That is, the decorator defines a mapping function + and then uses `argmap` to build and return a decorated function. + A simple example is a decorator that specifies which currency to report money. + The decorator (named `convert_to`) would be used like:: + + @convert_to("US_Dollars", "income") + def show_me_the_money(name, income): + print(f"{name} : {income}") + + And the code to create the decorator might be:: + + def convert_to(currency, which_arg): + def _convert(amount): + if amount.currency != currency: + amount = amount.to_currency(currency) + return amount + return argmap(_convert, which_arg) + + Despite this common idiom for argmap, most of the following examples + use the `@argmap(...)` idiom to save space. + + Here's an example use of argmap to sum the elements of two of the functions + arguments. The decorated function:: + + @argmap(sum, "xlist", "zlist") + def foo(xlist, y, zlist): + return xlist - y + zlist + + is syntactic sugar for:: + + def foo(xlist, y, zlist): + x = sum(xlist) + z = sum(zlist) + return x - y + z + + and is equivalent to (using argument indexes):: + + @argmap(sum, "xlist", 2) + def foo(xlist, y, zlist): + return xlist - y + zlist + + or:: + + @argmap(sum, "zlist", 0) + def foo(xlist, y, zlist): + return xlist - y + zlist + + Transforming functions can be applied to multiple arguments, such as:: + + def swap(x, y): + return y, x + + # the 2-tuple tells argmap that the map `swap` has 2 inputs/outputs. + @argmap(swap, ("a", "b")): + def foo(a, b, c): + return a / b * c + + is equivalent to:: + + def foo(a, b, c): + a, b = swap(a, b) + return a / b * c + + More generally, the applied arguments can be nested tuples of strings or ints. + The syntax `@argmap(some_func, ("a", ("b", "c")))` would expect `some_func` to + accept 2 inputs with the second expected to be a 2-tuple. It should then return + 2 outputs with the second a 2-tuple. The returns values would replace input "a" + "b" and "c" respectively. Similarly for `@argmap(some_func, (0, ("b", 2)))`. + + Also, note that an index larger than the number of named parameters is allowed + for variadic functions. For example:: + + def double(a): + return 2 * a + + @argmap(double, 3) + def overflow(a, *args): + return a, args + + print(overflow(1, 2, 3, 4, 5, 6)) # output is 1, (2, 3, 8, 5, 6) + + **Try Finally** + + Additionally, this `argmap` class can be used to create a decorator that + initiates a try...finally block. The decorator must be written to return + both the transformed argument and a closing function. + This feature was included to enable the `open_file` decorator which might + need to close the file or not depending on whether it had to open that file. + This feature uses the keyword-only `try_finally` argument to `@argmap`. + + For example this map opens a file and then makes sure it is closed:: + + def open_file(fn): + f = open(fn) + return f, lambda: f.close() + + The decorator applies that to the function `foo`:: + + @argmap(open_file, "file", try_finally=True) + def foo(file): + print(file.read()) + + is syntactic sugar for:: + + def foo(file): + file, close_file = open_file(file) + try: + print(file.read()) + finally: + close_file() + + and is equivalent to (using indexes):: + + @argmap(open_file, 0, try_finally=True) + def foo(file): + print(file.read()) + + Here's an example of the try_finally feature used to create a decorator:: + + def my_closing_decorator(which_arg): + def _opener(path): + if path is None: + path = open(path) + fclose = path.close + else: + # assume `path` handles the closing + fclose = lambda: None + return path, fclose + return argmap(_opener, which_arg, try_finally=True) + + which can then be used as:: + + @my_closing_decorator("file") + def fancy_reader(file=None): + # this code doesn't need to worry about closing the file + print(file.read()) + + Decorators with try_finally = True cannot be used with generator functions, + because the `finally` block is evaluated before the generator is exhausted:: + + @argmap(open_file, "file", try_finally=True) + def file_to_lines(file): + for line in file.readlines(): + yield line + + is equivalent to:: + + def file_to_lines_wrapped(file): + for line in file.readlines(): + yield line + + def file_to_lines_wrapper(file): + try: + file = open_file(file) + return file_to_lines_wrapped(file) + finally: + file.close() + + which behaves similarly to:: + + def file_to_lines_whoops(file): + file = open_file(file) + file.close() + for line in file.readlines(): + yield line + + because the `finally` block of `file_to_lines_wrapper` is executed before + the caller has a chance to exhaust the iterator. + + Notes + ----- + An object of this class is callable and intended to be used when + defining a decorator. Generally, a decorator takes a function as input + and constructs a function as output. Specifically, an `argmap` object + returns the input function decorated/wrapped so that specified arguments + are mapped (transformed) to new values before the decorated function is called. + + As an overview, the argmap object returns a new function with all the + dunder values of the original function (like `__doc__`, `__name__`, etc). + Code for this decorated function is built based on the original function's + signature. It starts by mapping the input arguments to potentially new + values. Then it calls the decorated function with these new values in place + of the indicated arguments that have been mapped. The return value of the + original function is then returned. This new function is the function that + is actually called by the user. + + Three additional features are provided. + 1) The code is lazily compiled. That is, the new function is returned + as an object without the code compiled, but with all information + needed so it can be compiled upon it's first invocation. This saves + time on import at the cost of additional time on the first call of + the function. Subsequent calls are then just as fast as normal. + + 2) If the "try_finally" keyword-only argument is True, a try block + follows each mapped argument, matched on the other side of the wrapped + call, by a finally block closing that mapping. We expect func to return + a 2-tuple: the mapped value and a function to be called in the finally + clause. This feature was included so the `open_file` decorator could + provide a file handle to the decorated function and close the file handle + after the function call. It even keeps track of whether to close the file + handle or not based on whether it had to open the file or the input was + already open. So, the decorated function does not need to include any + code to open or close files. + + 3) The maps applied can process multiple arguments. For example, + you could swap two arguments using a mapping, or transform + them to their sum and their difference. This was included to allow + a decorator in the `quality.py` module that checks that an input + `partition` is a valid partition of the nodes of the input graph `G`. + In this example, the map has inputs `(G, partition)`. After checking + for a valid partition, the map either raises an exception or leaves + the inputs unchanged. Thus many functions that make this check can + use the decorator rather than copy the checking code into each function. + More complicated nested argument structures are described below. + + The remaining notes describe the code structure and methods for this + class in broad terms to aid in understanding how to use it. + + Instantiating an `argmap` object simply stores the mapping function and + the input identifiers of which arguments to map. The resulting decorator + is ready to use this map to decorate any function. Calling that object + (`argmap.__call__`, but usually done via `@my_decorator`) a lazily + compiled thin wrapper of the decorated function is constructed, + wrapped with the necessary function dunder attributes like `__doc__` + and `__name__`. That thinly wrapped function is returned as the + decorated function. When that decorated function is called, the thin + wrapper of code calls `argmap._lazy_compile` which compiles the decorated + function (using `argmap.compile`) and replaces the code of the thin + wrapper with the newly compiled code. This saves the compilation step + every import of networkx, at the cost of compiling upon the first call + to the decorated function. + + When the decorated function is compiled, the code is recursively assembled + using the `argmap.assemble` method. The recursive nature is needed in + case of nested decorators. The result of the assembly is a number of + useful objects. + + sig : the function signature of the original decorated function as + constructed by :func:`argmap.signature`. This is constructed + using `inspect.signature` but enhanced with attribute + strings `sig_def` and `sig_call`, and other information + specific to mapping arguments of this function. + This information is used to construct a string of code defining + the new decorated function. + + wrapped_name : a unique internally used name constructed by argmap + for the decorated function. + + functions : a dict of the functions used inside the code of this + decorated function, to be used as `globals` in `exec`. + This dict is recursively updated to allow for nested decorating. + + mapblock : code (as a list of strings) to map the incoming argument + values to their mapped values. + + finallys : code (as a list of strings) to provide the possibly nested + set of finally clauses if needed. + + mutable_args : a bool indicating whether the `sig.args` tuple should be + converted to a list so mutation can occur. + + After this recursive assembly process, the `argmap.compile` method + constructs code (as strings) to convert the tuple `sig.args` to a list + if needed. It joins the defining code with appropriate indents and + compiles the result. Finally, this code is evaluated and the original + wrapper's implementation is replaced with the compiled version (see + `argmap._lazy_compile` for more details). + + Other `argmap` methods include `_name` and `_count` which allow internally + generated names to be unique within a python session. + The methods `_flatten` and `_indent` process the nested lists of strings + into properly indented python code ready to be compiled. + + More complicated nested tuples of arguments also allowed though + usually not used. For the simple 2 argument case, the argmap + input ("a", "b") implies the mapping function will take 2 arguments + and return a 2-tuple of mapped values. A more complicated example + with argmap input `("a", ("b", "c"))` requires the mapping function + take 2 inputs, with the second being a 2-tuple. It then must output + the 3 mapped values in the same nested structure `(newa, (newb, newc))`. + This level of generality is not often needed, but was convenient + to implement when handling the multiple arguments. + + See Also + -------- + not_implemented_for + open_file + nodes_or_number + random_state + py_random_state + networkx.community.quality.require_partition + require_partition + + """ + + def __init__(self, func, *args, try_finally=False): + self._func = func + self._args = args + self._finally = try_finally + + @staticmethod + def _lazy_compile(func): + """Compile the source of a wrapped function + + Assemble and compile the decorated function, and intrusively replace its + code with the compiled version's. The thinly wrapped function becomes + the decorated function. + + Parameters + ---------- + func : callable + A function returned by argmap.__call__ which is in the process + of being called for the first time. + + Returns + ------- + func : callable + The same function, with a new __code__ object. + + Notes + ----- + It was observed in NetworkX issue #4732 [1] that the import time of + NetworkX was significantly bloated by the use of decorators: over half + of the import time was being spent decorating functions. This was + somewhat improved by a change made to the `decorator` library, at the + cost of a relatively heavy-weight call to `inspect.Signature.bind` + for each call to the decorated function. + + The workaround we arrived at is to do minimal work at the time of + decoration. When the decorated function is called for the first time, + we compile a function with the same function signature as the wrapped + function. The resulting decorated function is faster than one made by + the `decorator` library, so that the overhead of the first call is + 'paid off' after a small number of calls. + + References + ---------- + + [1] https://github.com/networkx/networkx/issues/4732 + + """ + real_func = func.__argmap__.compile(func.__wrapped__) + func.__code__ = real_func.__code__ + func.__globals__.update(real_func.__globals__) + func.__dict__.update(real_func.__dict__) + return func + + def __call__(self, f): + """Construct a lazily decorated wrapper of f. + + The decorated function will be compiled when it is called for the first time, + and it will replace its own __code__ object so subsequent calls are fast. + + Parameters + ---------- + f : callable + A function to be decorated. + + Returns + ------- + func : callable + The decorated function. + + See Also + -------- + argmap._lazy_compile + """ + + def func(*args, __wrapper=None, **kwargs): + return argmap._lazy_compile(__wrapper)(*args, **kwargs) + + # standard function-wrapping stuff + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + func.__defaults__ = f.__defaults__ + func.__kwdefaults__.update(f.__kwdefaults__ or {}) + func.__module__ = f.__module__ + func.__qualname__ = f.__qualname__ + func.__dict__.update(f.__dict__) + func.__wrapped__ = f + + # now that we've wrapped f, we may have picked up some __dict__ or + # __kwdefaults__ items that were set by a previous argmap. Thus, we set + # these values after those update() calls. + + # If we attempt to access func from within itself, that happens through + # a closure -- which trips an error when we replace func.__code__. The + # standard workaround for functions which can't see themselves is to use + # a Y-combinator, as we do here. + func.__kwdefaults__["_argmap__wrapper"] = func + + # this self-reference is here because functools.wraps preserves + # everything in __dict__, and we don't want to mistake a non-argmap + # wrapper for an argmap wrapper + func.__self__ = func + + # this is used to variously call self.assemble and self.compile + func.__argmap__ = self + + if hasattr(f, "__argmap__"): + func.__is_generator = f.__is_generator + else: + func.__is_generator = inspect.isgeneratorfunction(f) + + if self._finally and func.__is_generator: + raise nx.NetworkXError("argmap cannot decorate generators with try_finally") + + return func + + __count = 0 + + @classmethod + def _count(cls): + """Maintain a globally-unique identifier for function names and "file" names + + Note that this counter is a class method reporting a class variable + so the count is unique within a Python session. It could differ from + session to session for a specific decorator depending on the order + that the decorators are created. But that doesn't disrupt `argmap`. + + This is used in two places: to construct unique variable names + in the `_name` method and to construct unique fictitious filenames + in the `_compile` method. + + Returns + ------- + count : int + An integer unique to this Python session (simply counts from zero) + """ + cls.__count += 1 + return cls.__count + + _bad_chars = re.compile("[^a-zA-Z0-9_]") + + @classmethod + def _name(cls, f): + """Mangle the name of a function to be unique but somewhat human-readable + + The names are unique within a Python session and set using `_count`. + + Parameters + ---------- + f : str or object + + Returns + ------- + name : str + The mangled version of `f.__name__` (if `f.__name__` exists) or `f` + + """ + f = f.__name__ if hasattr(f, "__name__") else f + fname = re.sub(cls._bad_chars, "_", f) + return f"argmap_{fname}_{cls._count()}" + + def compile(self, f): + """Compile the decorated function. + + Called once for a given decorated function -- collects the code from all + argmap decorators in the stack, and compiles the decorated function. + + Much of the work done here uses the `assemble` method to allow recursive + treatment of multiple argmap decorators on a single decorated function. + That flattens the argmap decorators, collects the source code to construct + a single decorated function, then compiles/executes/returns that function. + + The source code for the decorated function is stored as an attribute + `_code` on the function object itself. + + Note that Python's `compile` function requires a filename, but this + code is constructed without a file, so a fictitious filename is used + to describe where the function comes from. The name is something like: + "argmap compilation 4". + + Parameters + ---------- + f : callable + The function to be decorated + + Returns + ------- + func : callable + The decorated file + + """ + sig, wrapped_name, functions, mapblock, finallys, mutable_args = self.assemble( + f + ) + + call = f"{sig.call_sig.format(wrapped_name)}#" + mut_args = f"{sig.args} = list({sig.args})" if mutable_args else "" + body = argmap._indent(sig.def_sig, mut_args, mapblock, call, finallys) + code = "\n".join(body) + + locl = {} + globl = dict(functions.values()) + filename = f"{self.__class__} compilation {self._count()}" + compiled = compile(code, filename, "exec") + exec(compiled, globl, locl) + func = locl[sig.name] + func._code = code + return func + + def assemble(self, f): + """Collects components of the source for the decorated function wrapping f. + + If `f` has multiple argmap decorators, we recursively assemble the stack of + decorators into a single flattened function. + + This method is part of the `compile` method's process yet separated + from that method to allow recursive processing. The outputs are + strings, dictionaries and lists that collect needed info to + flatten any nested argmap-decoration. + + Parameters + ---------- + f : callable + The function to be decorated. If f is argmapped, we assemble it. + + Returns + ------- + sig : argmap.Signature + The function signature as an `argmap.Signature` object. + wrapped_name : str + The mangled name used to represent the wrapped function in the code + being assembled. + functions : dict + A dictionary mapping id(g) -> (mangled_name(g), g) for functions g + referred to in the code being assembled. These need to be present + in the ``globals`` scope of ``exec`` when defining the decorated + function. + mapblock : list of lists and/or strings + Code that implements mapping of parameters including any try blocks + if needed. This code will precede the decorated function call. + finallys : list of lists and/or strings + Code that implements the finally blocks to post-process the + arguments (usually close any files if needed) after the + decorated function is called. + mutable_args : bool + True if the decorator needs to modify positional arguments + via their indices. The compile method then turns the argument + tuple into a list so that the arguments can be modified. + """ + + # first, we check if f is already argmapped -- if that's the case, + # build up the function recursively. + # > mapblock is generally a list of function calls of the sort + # arg = func(arg) + # in addition to some try-blocks if needed. + # > finallys is a recursive list of finally blocks of the sort + # finally: + # close_func_1() + # finally: + # close_func_2() + # > functions is a dict of functions used in the scope of our decorated + # function. It will be used to construct globals used in compilation. + # We make functions[id(f)] = name_of_f, f to ensure that a given + # function is stored and named exactly once even if called by + # nested decorators. + if hasattr(f, "__argmap__") and f.__self__ is f: + ( + sig, + wrapped_name, + functions, + mapblock, + finallys, + mutable_args, + ) = f.__argmap__.assemble(f.__wrapped__) + functions = dict(functions) # shallow-copy just in case + else: + sig = self.signature(f) + wrapped_name = self._name(f) + mapblock, finallys = [], [] + functions = {id(f): (wrapped_name, f)} + mutable_args = False + + if id(self._func) in functions: + fname, _ = functions[id(self._func)] + else: + fname, _ = functions[id(self._func)] = self._name(self._func), self._func + + # this is a bit complicated -- we can call functions with a variety of + # nested arguments, so long as their input and output are tuples with + # the same nested structure. e.g. ("a", "b") maps arguments a and b. + # A more complicated nesting like (0, (3, 4)) maps arguments 0, 3, 4 + # expecting the mapping to output new values in the same nested shape. + # The ability to argmap multiple arguments was necessary for + # the decorator `nx.algorithms.community.quality.require_partition`, and + # while we're not taking full advantage of the ability to handle + # multiply-nested tuples, it was convenient to implement this in + # generality because the recursive call to `get_name` is necessary in + # any case. + applied = set() + + def get_name(arg, first=True): + nonlocal mutable_args + if isinstance(arg, tuple): + name = ", ".join(get_name(x, False) for x in arg) + return name if first else f"({name})" + if arg in applied: + raise nx.NetworkXError(f"argument {arg} is specified multiple times") + applied.add(arg) + if arg in sig.names: + return sig.names[arg] + elif isinstance(arg, str): + if sig.kwargs is None: + raise nx.NetworkXError( + f"name {arg} is not a named parameter and this function doesn't have kwargs" + ) + return f"{sig.kwargs}[{arg!r}]" + else: + if sig.args is None: + raise nx.NetworkXError( + f"index {arg} not a parameter index and this function doesn't have args" + ) + mutable_args = True + return f"{sig.args}[{arg - sig.n_positional}]" + + if self._finally: + # here's where we handle try_finally decorators. Such a decorator + # returns a mapped argument and a function to be called in a + # finally block. This feature was required by the open_file + # decorator. The below generates the code + # + # name, final = func(name) #<--append to mapblock + # try: #<--append to mapblock + # ... more argmapping and try blocks + # return WRAPPED_FUNCTION(...) + # ... more finally blocks + # finally: #<--prepend to finallys + # final() #<--prepend to finallys + # + for a in self._args: + name = get_name(a) + final = self._name(name) + mapblock.append(f"{name}, {final} = {fname}({name})") + mapblock.append("try:") + finallys = ["finally:", f"{final}()#", "#", finallys] + else: + mapblock.extend( + f"{name} = {fname}({name})" for name in map(get_name, self._args) + ) + + return sig, wrapped_name, functions, mapblock, finallys, mutable_args + + @classmethod + def signature(cls, f): + r"""Construct a Signature object describing `f` + + Compute a Signature so that we can write a function wrapping f with + the same signature and call-type. + + Parameters + ---------- + f : callable + A function to be decorated + + Returns + ------- + sig : argmap.Signature + The Signature of f + + Notes + ----- + The Signature is a namedtuple with names: + + name : a unique version of the name of the decorated function + signature : the inspect.signature of the decorated function + def_sig : a string used as code to define the new function + call_sig : a string used as code to call the decorated function + names : a dict keyed by argument name and index to the argument's name + n_positional : the number of positional arguments in the signature + args : the name of the VAR_POSITIONAL argument if any, i.e. \*theseargs + kwargs : the name of the VAR_KEYWORDS argument if any, i.e. \*\*kwargs + + These named attributes of the signature are used in `assemble` and `compile` + to construct a string of source code for the decorated function. + + """ + sig = inspect.signature(f, follow_wrapped=False) + def_sig = [] + call_sig = [] + names = {} + + kind = None + args = None + kwargs = None + npos = 0 + for i, param in enumerate(sig.parameters.values()): + # parameters can be position-only, keyword-or-position, keyword-only + # in any combination, but only in the order as above. we do edge + # detection to add the appropriate punctuation + prev = kind + kind = param.kind + if prev == param.POSITIONAL_ONLY != kind: + # the last token was position-only, but this one isn't + def_sig.append("/") + if prev != param.KEYWORD_ONLY == kind != param.VAR_POSITIONAL: + # param is the first keyword-only arg and isn't starred + def_sig.append("*") + + # star arguments as appropriate + if kind == param.VAR_POSITIONAL: + name = "*" + param.name + args = param.name + count = 0 + elif kind == param.VAR_KEYWORD: + name = "**" + param.name + kwargs = param.name + count = 0 + else: + names[i] = names[param.name] = param.name + name = param.name + count = 1 + + # assign to keyword-only args in the function call + if kind == param.KEYWORD_ONLY: + call_sig.append(f"{name} = {name}") + else: + npos += count + call_sig.append(name) + + def_sig.append(name) + + fname = cls._name(f) + def_sig = f'def {fname}({", ".join(def_sig)}):' + + call_sig = f"return {{}}({', '.join(call_sig)})" + + return cls.Signature(fname, sig, def_sig, call_sig, names, npos, args, kwargs) + + Signature = collections.namedtuple( + "Signature", + [ + "name", + "signature", + "def_sig", + "call_sig", + "names", + "n_positional", + "args", + "kwargs", + ], + ) + + @staticmethod + def _flatten(nestlist, visited): + """flattens a recursive list of lists that doesn't have cyclic references + + Parameters + ---------- + nestlist : iterable + A recursive list of objects to be flattened into a single iterable + + visited : set + A set of object ids which have been walked -- initialize with an + empty set + + Yields + ------ + Non-list objects contained in nestlist + + """ + for thing in nestlist: + if isinstance(thing, list): + if id(thing) in visited: + raise ValueError("A cycle was found in nestlist. Be a tree.") + else: + visited.add(id(thing)) + yield from argmap._flatten(thing, visited) + else: + yield thing + + _tabs = " " * 64 + + @staticmethod + def _indent(*lines): + """Indent list of code lines to make executable Python code + + Indents a tree-recursive list of strings, following the rule that one + space is added to the tab after a line that ends in a colon, and one is + removed after a line that ends in an hashmark. + + Parameters + ---------- + *lines : lists and/or strings + A recursive list of strings to be assembled into properly indented + code. + + Returns + ------- + code : str + + Examples + -------- + + argmap._indent(*["try:", "try:", "pass#", "finally:", "pass#", "#", + "finally:", "pass#"]) + + renders to + + '''try: + try: + pass# + finally: + pass# + # + finally: + pass#''' + """ + depth = 0 + for line in argmap._flatten(lines, set()): + yield f"{argmap._tabs[:depth]}{line}" + depth += (line[-1:] == ":") - (line[-1:] == "#") + + +# Vendored in from https://github.com/scikit-learn/scikit-learn/blob/8ed0270b99344cee9bb253cbfa1d986561ea6cd7/sklearn/utils/validation.py#L37C1-L90C44 +def deprecate_positional_args(func=None, *, version): + """Decorator for methods that issues warnings for positional arguments. + + Using the keyword-only argument syntax in pep 3102, arguments after the + * will issue a warning when passed as a positional argument. + + Parameters + ---------- + func : callable, default=None + Function to check arguments on. + version : callable, default="1.3" + The version when positional arguments will result in error. + """ + + def _inner_deprecate_positional_args(f): + sig = signature(f) + kwonly_args = [] + all_args = [] + + for name, param in sig.parameters.items(): + if param.kind == Parameter.POSITIONAL_OR_KEYWORD: + all_args.append(name) + elif param.kind == Parameter.KEYWORD_ONLY: + kwonly_args.append(name) + + @wraps(f) + def inner_f(*args, **kwargs): + extra_args = len(args) - len(all_args) + if extra_args <= 0: + return f(*args, **kwargs) + + # extra_args > 0 + args_msg = [ + f"{name}={arg}" + for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:]) + ] + args_msg = ", ".join(args_msg) + warnings.warn( + ( + f"Pass {args_msg} as keyword args. From NetworkX version " + f"{version} passing these as positional arguments " + "will result in an error" + ), + FutureWarning, + ) + kwargs.update(zip(sig.parameters, args)) + return f(**kwargs) + + return inner_f + + if func is not None: + return _inner_deprecate_positional_args(func) + + return _inner_deprecate_positional_args diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/random_sequence.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/random_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..20a7b5e0a7fcc426ed9840f8bed2abf500e357e5 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/random_sequence.py @@ -0,0 +1,164 @@ +""" +Utilities for generating random numbers, random sequences, and +random selections. +""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "powerlaw_sequence", + "zipf_rv", + "cumulative_distribution", + "discrete_sequence", + "random_weighted_sample", + "weighted_choice", +] + + +# The same helpers for choosing random sequences from distributions +# uses Python's random module +# https://docs.python.org/3/library/random.html + + +@py_random_state(2) +def powerlaw_sequence(n, exponent=2.0, seed=None): + """ + Return sample sequence of length n from a power law distribution. + """ + return [seed.paretovariate(exponent - 1) for i in range(n)] + + +@py_random_state(2) +def zipf_rv(alpha, xmin=1, seed=None): + r"""Returns a random value chosen from the Zipf distribution. + + The return value is an integer drawn from the probability distribution + + .. math:: + + p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})}, + + where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function. + + Parameters + ---------- + alpha : float + Exponent value of the distribution + xmin : int + Minimum value + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + x : int + Random value from Zipf distribution + + Raises + ------ + ValueError: + If xmin < 1 or + If alpha <= 1 + + Notes + ----- + The rejection algorithm generates random values for a the power-law + distribution in uniformly bounded expected time dependent on + parameters. See [1]_ for details on its operation. + + Examples + -------- + >>> nx.utils.zipf_rv(alpha=2, xmin=3, seed=42) + 8 + + References + ---------- + .. [1] Luc Devroye, Non-Uniform Random Variate Generation, + Springer-Verlag, New York, 1986. + """ + if xmin < 1: + raise ValueError("xmin < 1") + if alpha <= 1: + raise ValueError("a <= 1.0") + a1 = alpha - 1.0 + b = 2**a1 + while True: + u = 1.0 - seed.random() # u in (0,1] + v = seed.random() # v in [0,1) + x = int(xmin * u ** -(1.0 / a1)) + t = (1.0 + (1.0 / x)) ** a1 + if v * x * (t - 1.0) / (b - 1.0) <= t / b: + break + return x + + +def cumulative_distribution(distribution): + """Returns normalized cumulative distribution from discrete distribution.""" + + cdf = [0.0] + psum = sum(distribution) + for i in range(len(distribution)): + cdf.append(cdf[i] + distribution[i] / psum) + return cdf + + +@py_random_state(3) +def discrete_sequence(n, distribution=None, cdistribution=None, seed=None): + """ + Return sample sequence of length n from a given discrete distribution + or discrete cumulative distribution. + + One of the following must be specified. + + distribution = histogram of values, will be normalized + + cdistribution = normalized discrete cumulative distribution + + """ + import bisect + + if cdistribution is not None: + cdf = cdistribution + elif distribution is not None: + cdf = cumulative_distribution(distribution) + else: + raise nx.NetworkXError( + "discrete_sequence: distribution or cdistribution missing" + ) + + # get a uniform random number + inputseq = [seed.random() for i in range(n)] + + # choose from CDF + seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq] + return seq + + +@py_random_state(2) +def random_weighted_sample(mapping, k, seed=None): + """Returns k items without replacement from a weighted sample. + + The input is a dictionary of items with weights as values. + """ + if k > len(mapping): + raise ValueError("sample larger than population") + sample = set() + while len(sample) < k: + sample.add(weighted_choice(mapping, seed)) + return list(sample) + + +@py_random_state(1) +def weighted_choice(mapping, seed=None): + """Returns a single element from a weighted sample. + + The input is a dictionary of items with weights as values. + """ + # use roulette method + rnd = seed.random() * sum(mapping.values()) + for k, w in mapping.items(): + rnd -= w + if rnd < 0: + return k diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/tests/test__init.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/tests/test__init.py new file mode 100644 index 0000000000000000000000000000000000000000..ecbcce36df7cd37781dd45879f63f7d6f55e5567 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/tests/test__init.py @@ -0,0 +1,11 @@ +import pytest + + +def test_utils_namespace(): + """Ensure objects are not unintentionally exposed in utils namespace.""" + with pytest.raises(ImportError): + from networkx.utils import nx + with pytest.raises(ImportError): + from networkx.utils import sys + with pytest.raises(ImportError): + from networkx.utils import defaultdict, deque