diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/TestTypes.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/TestTypes.py new file mode 100644 index 0000000000000000000000000000000000000000..2fe7347f8de4e525b7526d19fb7f7e21f4fe01b4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/TestTypes.py @@ -0,0 +1,77 @@ +from __future__ import absolute_import + +import unittest + +import Cython.Compiler.PyrexTypes as PT + + +class TestMethodDispatcherTransform(unittest.TestCase): + + def test_widest_numeric_type(self): + def assert_widest(type1, type2, widest): + self.assertEqual(widest, PT.widest_numeric_type(type1, type2)) + + assert_widest(PT.c_int_type, PT.c_long_type, PT.c_long_type) + assert_widest(PT.c_double_type, PT.c_long_type, PT.c_double_type) + assert_widest(PT.c_longdouble_type, PT.c_long_type, PT.c_longdouble_type) + + cenum = PT.CEnumType("E", "cenum", typedef_flag=False) + assert_widest(PT.c_int_type, cenum, PT.c_int_type) + + +class TestTypeIdentifiers(unittest.TestCase): + + TEST_DATA = [ + ("char*", "char__ptr"), + ("char *", "char__ptr"), + ("char **", "char__ptr__ptr"), + ("_typedef", "_typedef"), + ("__typedef", "__dundertypedef"), + ("___typedef", "__dunder_typedef"), + ("____typedef", "__dunder__dundertypedef"), + ("_____typedef", "__dunder__dunder_typedef"), + ("const __typedef", "__const___dundertypedef"), + ("int[42]", "int__lArr42__rArr"), + ("int[:]", "int__lArr__D__rArr"), + ("int[:,:]", "int__lArr__D__comma___D__rArr"), + ("int[:,:,:]", "int__lArr__D__comma___D__comma___D__rArr"), + ("int[:,:,...]", "int__lArr__D__comma___D__comma___EL__rArr"), + ("std::vector", "std__in_vector"), + ("std::vector&&", "std__in_vector__fwref"), + ("const std::vector", "__const_std__in_vector"), + ("const std::vector&", "__const_std__in_vector__ref"), + ("const_std", "const_std"), + ] + + def test_escape_special_type_characters(self): + test_func = PT._escape_special_type_characters # keep test usage visible for IDEs + function_name = "_escape_special_type_characters" + self._test_escape(function_name) + + def test_type_identifier_for_declaration(self): + test_func = PT.type_identifier_from_declaration # keep test usage visible for IDEs + function_name = test_func.__name__ + self._test_escape(function_name) + + # differences due to whitespace removal + test_data = [ + ("const &std::vector", "const__refstd__in_vector"), + ("const &std::vector", "const__refstd__in_vector__lAngint__rAng"), + ("const &&std::vector", "const__fwrefstd__in_vector"), + ("const &&&std::vector", "const__fwref__refstd__in_vector"), + ("const &&std::vector", "const__fwrefstd__in_vector"), + ("void (*func)(int x, float y)", + "975d51__void__lParen__ptrfunc__rParen__lParenint__spac__etc"), + ("float ** (*func)(int x, int[:] y)", + "31883a__float__ptr__ptr__lParen__ptrfunc__rParen__lPar__etc"), + ] + self._test_escape(function_name, test_data) + + def _test_escape(self, func_name, test_data=TEST_DATA): + escape = getattr(PT, func_name) + for declaration, expected in test_data: + escaped_value = escape(declaration) + self.assertEqual(escaped_value, expected, "%s('%s') == '%s' != '%s'" % ( + func_name, declaration, escaped_value, expected)) + # test that the length has been successfully capped + self.assertLessEqual(len(escaped_value), 64) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestCmdLine.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestCmdLine.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeb1e9ba63bf9346ea028918d69b824191f79876 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestCmdLine.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestFlowControl.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestFlowControl.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bb7399dc065f376c8da6b2143ba510fda9c1bb2 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestFlowControl.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestGrammar.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestGrammar.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1a0bc72bae5dd9c2176c8129fe5975288db9195 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestGrammar.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestParseTreeTransforms.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestParseTreeTransforms.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd9a933222c16f70ac725e9e465336b5e2cbcd16 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestParseTreeTransforms.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestScanning.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestScanning.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..806f34642bccaf0ba54e61492833b9989cdea5f4 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestScanning.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestTypes.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestTypes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bd2131558fb0a10785c8d7929515e5dfcedbe14 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestTypes.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestUtilityLoad.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestUtilityLoad.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65c186ed5137ccce7fa7a2ed98d0619f71eff5c2 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/TestUtilityLoad.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/Utils.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/Utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce69919490a2815ee72c1de265150e66a01720f7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/Tests/__pycache__/Utils.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/ImportExport.c b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/ImportExport.c new file mode 100644 index 0000000000000000000000000000000000000000..18c2cef7db34400fdf49f7542ec467f79d524c6d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utility/ImportExport.c @@ -0,0 +1,939 @@ +/////////////// ImportDottedModule.proto /////////////// + +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); /*proto*/ +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple); /*proto*/ +#endif + +/////////////// ImportDottedModule /////////////// +//@requires: Import + +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { + PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; + if (unlikely(PyErr_Occurred())) { + PyErr_Clear(); + } + if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { + partial_name = name; + } else { + slice = PySequence_GetSlice(parts_tuple, 0, count); + if (unlikely(!slice)) + goto bad; + sep = PyUnicode_FromStringAndSize(".", 1); + if (unlikely(!sep)) + goto bad; + partial_name = PyUnicode_Join(sep, slice); + } + + PyErr_Format( +#if PY_MAJOR_VERSION < 3 + PyExc_ImportError, + "No module named '%s'", PyString_AS_STRING(partial_name)); +#else +#if PY_VERSION_HEX >= 0x030600B1 + PyExc_ModuleNotFoundError, +#else + PyExc_ImportError, +#endif + "No module named '%U'", partial_name); +#endif + +bad: + Py_XDECREF(sep); + Py_XDECREF(slice); + Py_XDECREF(partial_name); + return NULL; +} +#endif + +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { + PyObject *imported_module; +#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + return NULL; + imported_module = __Pyx_PyDict_GetItemStr(modules, name); + Py_XINCREF(imported_module); +#else + imported_module = PyImport_GetModule(name); +#endif + return imported_module; +} +#endif + +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple) { + Py_ssize_t i, nparts; + nparts = PyTuple_GET_SIZE(parts_tuple); + for (i=1; i < nparts && module; i++) { + PyObject *part, *submodule; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + part = PyTuple_GET_ITEM(parts_tuple, i); +#else + part = PySequence_ITEM(parts_tuple, i); +#endif + submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); + // We stop if the attribute isn't found, i.e. if submodule is NULL here. +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(part); +#endif + Py_DECREF(module); + module = submodule; + } + if (unlikely(!module)) { + return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); + } + return module; +} +#endif + +static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if PY_MAJOR_VERSION < 3 + PyObject *module, *from_list, *star = PYIDENT("*"); + CYTHON_UNUSED_VAR(parts_tuple); + from_list = PyList_New(1); + if (unlikely(!from_list)) + return NULL; + Py_INCREF(star); + PyList_SET_ITEM(from_list, 0, star); + module = __Pyx_Import(name, from_list, 0); + Py_DECREF(from_list); + return module; +#else + PyObject *imported_module; + PyObject *module = __Pyx_Import(name, NULL, 0); + if (!parts_tuple || unlikely(!module)) + return module; + + // Look up module in sys.modules, which is safer than the attribute lookups below. + imported_module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(imported_module)) { + Py_DECREF(module); + return imported_module; + } + PyErr_Clear(); + return __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); +#endif +} + +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 + PyObject *module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(module)) { + // CPython guards against thread-concurrent initialisation in importlib. + // In this case, we let PyImport_ImportModuleLevelObject() handle the locking. + PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, PYIDENT("__spec__")); + if (likely(spec)) { + PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, PYIDENT("_initializing")); + if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { + Py_DECREF(spec); + spec = NULL; + } + Py_XDECREF(unsafe); + } + if (likely(!spec)) { + // Not in initialisation phase => use modules as is. + PyErr_Clear(); + return module; + } + Py_DECREF(spec); + Py_DECREF(module); + } else if (PyErr_Occurred()) { + PyErr_Clear(); + } +#endif + + return __Pyx__ImportDottedModule(name, parts_tuple); +} + + +/////////////// ImportDottedModuleRelFirst.proto /////////////// + +static PyObject *__Pyx_ImportDottedModuleRelFirst(PyObject *name, PyObject *parts_tuple); /*proto*/ + +/////////////// ImportDottedModuleRelFirst /////////////// +//@requires: ImportDottedModule +//@requires: Import + +static PyObject *__Pyx_ImportDottedModuleRelFirst(PyObject *name, PyObject *parts_tuple) { + PyObject *module; + PyObject *from_list = NULL; +#if PY_MAJOR_VERSION < 3 + PyObject *star = PYIDENT("*"); + from_list = PyList_New(1); + if (unlikely(!from_list)) + return NULL; + Py_INCREF(star); + PyList_SET_ITEM(from_list, 0, star); +#endif + module = __Pyx_Import(name, from_list, -1); + Py_XDECREF(from_list); + if (module) { + #if PY_MAJOR_VERSION >= 3 + if (parts_tuple) { + module = __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); + } + #endif + return module; + } + if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) + return NULL; + PyErr_Clear(); + // try absolute import + return __Pyx_ImportDottedModule(name, parts_tuple); +} + + +/////////////// Import.proto /////////////// + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ + +/////////////// Import /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires:StringTools.c::IncludeStringH +//@substitute: naming + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *module = 0; + PyObject *empty_dict = 0; + PyObject *empty_list = 0; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr($builtins_cname, PYIDENT("__import__")); + if (unlikely(!py_import)) + goto bad; + if (!from_list) { + empty_list = PyList_New(0); + if (unlikely(!empty_list)) + goto bad; + from_list = empty_list; + } + #endif + empty_dict = PyDict_New(); + if (unlikely(!empty_dict)) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.') != NULL) { + /* try package relative import first */ + module = PyImport_ImportModuleLevelObject( + name, $moddict_cname, empty_dict, from_list, 1); + if (unlikely(!module)) { + if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) + goto bad; + PyErr_Clear(); + } + } + level = 0; /* try absolute import on failure */ + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (unlikely(!py_level)) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, $moddict_cname, empty_dict, from_list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, $moddict_cname, empty_dict, from_list, level); + #endif + } + } +bad: + Py_XDECREF(empty_dict); + Py_XDECREF(empty_list); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + return module; +} + + +/////////////// ImportFrom.proto /////////////// + +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /*proto*/ + +/////////////// ImportFrom /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStr + +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + // 'name' may refer to a (sub-)module which has not finished initialization + // yet, and may not be assigned as an attribute to its parent, so try + // finding it by full name. + const char* module_name_str = 0; + PyObject* module_name = 0; + PyObject* module_dot = 0; + PyObject* full_name = 0; + PyErr_Clear(); + module_name_str = PyModule_GetName(module); + if (unlikely(!module_name_str)) { goto modbad; } + module_name = PyUnicode_FromString(module_name_str); + if (unlikely(!module_name)) { goto modbad; } + module_dot = PyUnicode_Concat(module_name, PYUNICODE(".")); + if (unlikely(!module_dot)) { goto modbad; } + full_name = PyUnicode_Concat(module_dot, name); + if (unlikely(!full_name)) { goto modbad; } + #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + { + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + goto modbad; + value = PyObject_GetItem(modules, full_name); + } + #else + value = PyImport_GetModule(full_name); + #endif + + modbad: + Py_XDECREF(full_name); + Py_XDECREF(module_dot); + Py_XDECREF(module_name); + } + if (unlikely(!value)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + + +/////////////// ImportStar /////////////// +//@substitute: naming + +/* import_all_from is an unexposed function from ceval.c */ + +static int +__Pyx_import_all_from(PyObject *locals, PyObject *v) +{ + PyObject *all = PyObject_GetAttrString(v, "__all__"); + PyObject *dict, *name, *value; + int skip_leading_underscores = 0; + int pos, err; + + if (all == NULL) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return -1; /* Unexpected error */ + PyErr_Clear(); + dict = PyObject_GetAttrString(v, "__dict__"); + if (dict == NULL) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return -1; + PyErr_SetString(PyExc_ImportError, + "from-import-* object has no __dict__ and no __all__"); + return -1; + } +#if PY_MAJOR_VERSION < 3 + all = PyObject_CallMethod(dict, (char *)"keys", NULL); +#else + all = PyMapping_Keys(dict); +#endif + Py_DECREF(dict); + if (all == NULL) + return -1; + skip_leading_underscores = 1; + } + + for (pos = 0, err = 0; ; pos++) { + name = PySequence_GetItem(all, pos); + if (name == NULL) { + if (!PyErr_ExceptionMatches(PyExc_IndexError)) + err = -1; + else + PyErr_Clear(); + break; + } + if (skip_leading_underscores && +#if PY_MAJOR_VERSION < 3 + likely(PyString_Check(name)) && + PyString_AS_STRING(name)[0] == '_') +#else + likely(PyUnicode_Check(name)) && + likely(__Pyx_PyUnicode_GET_LENGTH(name)) && + __Pyx_PyUnicode_READ_CHAR(name, 0) == '_') +#endif + { + Py_DECREF(name); + continue; + } + value = PyObject_GetAttr(v, name); + if (value == NULL) + err = -1; + else if (PyDict_CheckExact(locals)) + err = PyDict_SetItem(locals, name, value); + else + err = PyObject_SetItem(locals, name, value); + Py_DECREF(name); + Py_XDECREF(value); + if (err != 0) + break; + } + Py_DECREF(all); + return err; +} + + +static int ${import_star}(PyObject* m) { + + int i; + int ret = -1; + char* s; + PyObject *locals = 0; + PyObject *list = 0; +#if PY_MAJOR_VERSION >= 3 + PyObject *utf8_name = 0; +#endif + PyObject *name; + PyObject *item; + + locals = PyDict_New(); if (!locals) goto bad; + if (__Pyx_import_all_from(locals, m) < 0) goto bad; + list = PyDict_Items(locals); if (!list) goto bad; + + for(i=0; i= 3 + utf8_name = PyUnicode_AsUTF8String(name); + if (!utf8_name) goto bad; + s = PyBytes_AS_STRING(utf8_name); + if (${import_star_set}(item, name, s) < 0) goto bad; + Py_DECREF(utf8_name); utf8_name = 0; +#else + s = PyString_AsString(name); + if (!s) goto bad; + if (${import_star_set}(item, name, s) < 0) goto bad; +#endif + } + ret = 0; + +bad: + Py_XDECREF(locals); + Py_XDECREF(list); +#if PY_MAJOR_VERSION >= 3 + Py_XDECREF(utf8_name); +#endif + return ret; +} + + +/////////////// SetPackagePathFromImportLib.proto /////////////// + +// PY_VERSION_HEX >= 0x03030000 +#if PY_MAJOR_VERSION >= 3 && !CYTHON_PEP489_MULTI_PHASE_INIT +static int __Pyx_SetPackagePathFromImportLib(PyObject *module_name); +#else +#define __Pyx_SetPackagePathFromImportLib(a) 0 +#endif + +/////////////// SetPackagePathFromImportLib /////////////// +//@substitute: naming + +// PY_VERSION_HEX >= 0x03030000 +#if PY_MAJOR_VERSION >= 3 && !CYTHON_PEP489_MULTI_PHASE_INIT +static int __Pyx_SetPackagePathFromImportLib(PyObject *module_name) { + PyObject *importlib, *osmod, *ossep, *parts, *package_path; + PyObject *file_path = NULL; + int result; + PyObject *spec; + // package_path = [importlib.util.find_spec(module_name).origin.rsplit(os.sep, 1)[0]] + importlib = PyImport_ImportModule("importlib.util"); + if (unlikely(!importlib)) + goto bad; + spec = PyObject_CallMethod(importlib, "find_spec", "(O)", module_name); + Py_DECREF(importlib); + if (unlikely(!spec)) + goto bad; + file_path = PyObject_GetAttrString(spec, "origin"); + Py_DECREF(spec); + if (unlikely(!file_path)) + goto bad; + + if (unlikely(PyObject_SetAttrString($module_cname, "__file__", file_path) < 0)) + goto bad; + + osmod = PyImport_ImportModule("os"); + if (unlikely(!osmod)) + goto bad; + ossep = PyObject_GetAttrString(osmod, "sep"); + Py_DECREF(osmod); + if (unlikely(!ossep)) + goto bad; + parts = PyObject_CallMethod(file_path, "rsplit", "(Oi)", ossep, 1); + Py_DECREF(file_path); file_path = NULL; + Py_DECREF(ossep); + if (unlikely(!parts)) + goto bad; + package_path = Py_BuildValue("[O]", PyList_GET_ITEM(parts, 0)); + Py_DECREF(parts); + if (unlikely(!package_path)) + goto bad; + goto set_path; + +bad: + PyErr_WriteUnraisable(module_name); + Py_XDECREF(file_path); + + // set an empty path list on failure + PyErr_Clear(); + package_path = PyList_New(0); + if (unlikely(!package_path)) + return -1; + +set_path: + result = PyObject_SetAttrString($module_cname, "__path__", package_path); + Py_DECREF(package_path); + return result; +} +#endif + + +/////////////// TypeImport.proto /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportType_proto_$cyversion +#define __PYX_HAVE_RT_ImportType_proto_$cyversion + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +#include +#endif + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || __cplusplus >= 201103L +#define __PYX_GET_STRUCT_ALIGNMENT_$cyversion(s) alignof(s) +#else +// best guess at what the alignment could be since we can't measure it +#define __PYX_GET_STRUCT_ALIGNMENT_$cyversion(s) sizeof(void*) +#endif + +enum __Pyx_ImportType_CheckSize_$cyversion { + __Pyx_ImportType_CheckSize_Error_$cyversion = 0, + __Pyx_ImportType_CheckSize_Warn_$cyversion = 1, + __Pyx_ImportType_CheckSize_Ignore_$cyversion = 2 +}; + +static PyTypeObject *__Pyx_ImportType_$cyversion(PyObject* module, const char *module_name, const char *class_name, size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_$cyversion check_size); /*proto*/ + +#endif + +/////////////// TypeImport /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportType_$cyversion +#define __PYX_HAVE_RT_ImportType_$cyversion +static PyTypeObject *__Pyx_ImportType_$cyversion(PyObject *module, const char *module_name, const char *class_name, + size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_$cyversion check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; + Py_ssize_t itemsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + PyObject *py_itemsize; +#endif + + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#if !CYTHON_COMPILING_IN_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; + itemsize = ((PyTypeObject *)result)->tp_itemsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; + py_itemsize = PyObject_GetAttrString(result, "__itemsize__"); + if (!py_itemsize) + goto bad; + itemsize = PyLong_AsSsize_t(py_itemsize); + Py_DECREF(py_itemsize); + py_itemsize = 0; + if (itemsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (itemsize) { + // If itemsize is smaller than the alignment the struct can end up with some extra + // padding at the end. In this case we need to work out the maximum size that + // the padding could be when calculating the range of valid struct sizes. + if (size % alignment) { + // if this is true we've probably calculated the alignment wrongly + // (most likely because alignof isn't available) + alignment = size % alignment; + } + if (itemsize < (Py_ssize_t)alignment) + itemsize = (Py_ssize_t)alignment; + } + if ((size_t)(basicsize + itemsize) < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize+itemsize); + goto bad; + } + // varobjects almost have structs between basicsize and basicsize + itemsize + // but the struct isn't always one of the two limiting values + if (check_size == __Pyx_ImportType_CheckSize_Error_$cyversion && + ((size_t)basicsize > size || (size_t)(basicsize + itemsize) < size)) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd-%zd from PyObject", + module_name, class_name, size, basicsize, basicsize+itemsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn_$cyversion && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + /* check_size == __Pyx_ImportType_CheckSize_Ignore does not warn nor error */ + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/////////////// FunctionImport.proto /////////////// +//@substitute: naming + +static int __Pyx_ImportFunction_$cyversion(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /*proto*/ + +/////////////// FunctionImport /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportFunction_$cyversion +#define __PYX_HAVE_RT_ImportFunction_$cyversion +static int __Pyx_ImportFunction_$cyversion(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + + d = PyObject_GetAttrString(module, (char *)"$api_name"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + +/////////////// FunctionExport.proto /////////////// + +static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /*proto*/ + +/////////////// FunctionExport /////////////// +//@substitute: naming + +static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + + d = PyObject_GetAttrString($module_cname, (char *)"$api_name"); + if (!d) { + PyErr_Clear(); + d = PyDict_New(); + if (!d) + goto bad; + Py_INCREF(d); + if (PyModule_AddObject($module_cname, (char *)"$api_name", d) < 0) + goto bad; + } + tmp.fp = f; + cobj = PyCapsule_New(tmp.p, sig, 0); + if (!cobj) + goto bad; + if (PyDict_SetItemString(d, name, cobj) < 0) + goto bad; + Py_DECREF(cobj); + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(cobj); + Py_XDECREF(d); + return -1; +} + +/////////////// VoidPtrImport.proto /////////////// +//@substitute: naming + +static int __Pyx_ImportVoidPtr_$cyversion(PyObject *module, const char *name, void **p, const char *sig); /*proto*/ + +/////////////// VoidPtrImport /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportVoidPtr_$cyversion +#define __PYX_HAVE_RT_ImportVoidPtr_$cyversion +static int __Pyx_ImportVoidPtr_$cyversion(PyObject *module, const char *name, void **p, const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + + d = PyObject_GetAttrString(module, (char *)"$api_name"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, name); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C variable %.200s", + PyModule_GetName(module), name); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj)); + goto bad; + } + *p = PyCapsule_GetPointer(cobj, sig); + if (!(*p)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + +/////////////// VoidPtrExport.proto /////////////// + +static int __Pyx_ExportVoidPtr(PyObject *name, void *p, const char *sig); /*proto*/ + +/////////////// VoidPtrExport /////////////// +//@substitute: naming +//@requires: ObjectHandling.c::PyObjectSetAttrStr + +static int __Pyx_ExportVoidPtr(PyObject *name, void *p, const char *sig) { + PyObject *d; + PyObject *cobj = 0; + + d = PyDict_GetItem($moddict_cname, PYIDENT("$api_name")); + Py_XINCREF(d); + if (!d) { + d = PyDict_New(); + if (!d) + goto bad; + if (__Pyx_PyObject_SetAttrStr($module_cname, PYIDENT("$api_name"), d) < 0) + goto bad; + } + cobj = PyCapsule_New(p, sig, 0); + if (!cobj) + goto bad; + if (PyDict_SetItem(d, name, cobj) < 0) + goto bad; + Py_DECREF(cobj); + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(cobj); + Py_XDECREF(d); + return -1; +} + + +/////////////// SetVTable.proto /////////////// + +static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable); /*proto*/ + +/////////////// SetVTable /////////////// + +static int __Pyx_SetVtable(PyTypeObject *type, void *vtable) { + PyObject *ob = PyCapsule_New(vtable, 0, 0); + if (unlikely(!ob)) + goto bad; +#if CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(PyObject_SetAttr((PyObject *) type, PYIDENT("__pyx_vtable__"), ob) < 0)) +#else + if (unlikely(PyDict_SetItem(type->tp_dict, PYIDENT("__pyx_vtable__"), ob) < 0)) +#endif + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + + +/////////////// GetVTable.proto /////////////// + +static void* __Pyx_GetVtable(PyTypeObject *type); /*proto*/ + +/////////////// GetVTable /////////////// + +static void* __Pyx_GetVtable(PyTypeObject *type) { + void* ptr; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *ob = PyObject_GetAttr((PyObject *)type, PYIDENT("__pyx_vtable__")); +#else + PyObject *ob = PyObject_GetItem(type->tp_dict, PYIDENT("__pyx_vtable__")); +#endif + if (!ob) + goto bad; + ptr = PyCapsule_GetPointer(ob, 0); + if (!ptr && !PyErr_Occurred()) + PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); + Py_DECREF(ob); + return ptr; +bad: + Py_XDECREF(ob); + return NULL; +} + + +/////////////// MergeVTables.proto /////////////// +//@requires: GetVTable + +// TODO: find a way to make this work with the Limited API! +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_MergeVtables(PyTypeObject *type); /*proto*/ +#endif + +/////////////// MergeVTables /////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_MergeVtables(PyTypeObject *type) { + int i; + void** base_vtables; + __Pyx_TypeName tp_base_name; + __Pyx_TypeName base_name; + void* unknown = (void*)-1; + PyObject* bases = type->tp_bases; + int base_depth = 0; + { + PyTypeObject* base = type->tp_base; + while (base) { + base_depth += 1; + base = base->tp_base; + } + } + base_vtables = (void**) malloc(sizeof(void*) * (size_t)(base_depth + 1)); + base_vtables[0] = unknown; + // Could do MRO resolution of individual methods in the future, assuming + // compatible vtables, but for now simply require a common vtable base. + // Note that if the vtables of various bases are extended separately, + // resolution isn't possible and we must reject it just as when the + // instance struct is so extended. (It would be good to also do this + // check when a multiple-base class is created in pure Python as well.) + for (i = 1; i < PyTuple_GET_SIZE(bases); i++) { + void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))); + if (base_vtable != NULL) { + int j; + PyTypeObject* base = type->tp_base; + for (j = 0; j < base_depth; j++) { + if (base_vtables[j] == unknown) { + base_vtables[j] = __Pyx_GetVtable(base); + base_vtables[j + 1] = unknown; + } + if (base_vtables[j] == base_vtable) { + break; + } else if (base_vtables[j] == NULL) { + // No more potential matching bases (with vtables). + goto bad; + } + base = base->tp_base; + } + } + } + PyErr_Clear(); + free(base_vtables); + return 0; +bad: + tp_base_name = __Pyx_PyType_GetName(type->tp_base); + base_name = __Pyx_PyType_GetName((PyTypeObject*)PyTuple_GET_ITEM(bases, i)); + PyErr_Format(PyExc_TypeError, + "multiple bases have vtable conflict: '" __Pyx_FMT_TYPENAME "' and '" __Pyx_FMT_TYPENAME "'", tp_base_name, base_name); + __Pyx_DECREF_TypeName(tp_base_name); + __Pyx_DECREF_TypeName(base_name); + free(base_vtables); + return -1; +} +#endif + + +/////////////// ImportNumPyArray.proto /////////////// + +static PyObject *__pyx_numpy_ndarray = NULL; + +static PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void); /*proto*/ + +/////////////// ImportNumPyArray.cleanup /////////////// +Py_CLEAR(__pyx_numpy_ndarray); + +/////////////// ImportNumPyArray /////////////// +//@requires: ImportExport.c::Import + +static PyObject* __Pyx__ImportNumPyArray(void) { + PyObject *numpy_module, *ndarray_object = NULL; + numpy_module = __Pyx_Import(PYIDENT("numpy"), NULL, 0); + if (likely(numpy_module)) { + ndarray_object = PyObject_GetAttrString(numpy_module, "ndarray"); + Py_DECREF(numpy_module); + } + if (unlikely(!ndarray_object)) { + // ImportError, AttributeError, ... + PyErr_Clear(); + } + if (unlikely(!ndarray_object || !PyObject_TypeCheck(ndarray_object, &PyType_Type))) { + Py_XDECREF(ndarray_object); + Py_INCREF(Py_None); + ndarray_object = Py_None; + } + return ndarray_object; +} + +static CYTHON_INLINE PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void) { + if (unlikely(!__pyx_numpy_ndarray)) { + __pyx_numpy_ndarray = __Pyx__ImportNumPyArray(); + } + Py_INCREF(__pyx_numpy_ndarray); + return __pyx_numpy_ndarray; +} diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51d96687c1271505ecbc7ae663f98fa4444949c9 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/walks.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/walks.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f4638eec621025fc1203af0908f26f047f52b4 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/__pycache__/walks.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27d5ecdaa8d3cafe442dc25cf90dfed170e18f79 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f36f56eacc46efed04eb2886ed309df59b25f24f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a9470932daa8a8b874543d91e1aca032999523 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6e0e0f182e34bcf7786f2581d75609015581a16 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c135a26fdb8c808f92aed90c0922b5cb44e7c429 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_asteroidal.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_asteroidal.py new file mode 100644 index 0000000000000000000000000000000000000000..67131b2d05026317b496d06e6b382836c8c26367 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_asteroidal.py @@ -0,0 +1,23 @@ +import networkx as nx + + +def test_is_at_free(): + is_at_free = nx.asteroidal.is_at_free + + cycle = nx.cycle_graph(6) + assert not is_at_free(cycle) + + path = nx.path_graph(6) + assert is_at_free(path) + + small_graph = nx.complete_graph(2) + assert is_at_free(small_graph) + + petersen = nx.petersen_graph() + assert not is_at_free(petersen) + + clique = nx.complete_graph(6) + assert is_at_free(clique) + + line_clique = nx.line_graph(clique) + assert not is_at_free(line_clique) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_bridges.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_bridges.py new file mode 100644 index 0000000000000000000000000000000000000000..9c3ceba607f9b7157c4c070a68becbdf79db8466 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_bridges.py @@ -0,0 +1,144 @@ +"""Unit tests for bridge-finding algorithms.""" + +import pytest + +import networkx as nx + + +class TestBridges: + """Unit tests for the bridge-finding function.""" + + def test_single_bridge(self): + edges = [ + # DFS tree edges. + (1, 2), + (2, 3), + (3, 4), + (3, 5), + (5, 6), + (6, 7), + (7, 8), + (5, 9), + (9, 10), + # Nontree edges. + (1, 3), + (1, 4), + (2, 5), + (5, 10), + (6, 8), + ] + G = nx.Graph(edges) + source = 1 + bridges = list(nx.bridges(G, source)) + assert bridges == [(5, 6)] + + def test_barbell_graph(self): + # The (3, 0) barbell graph has two triangles joined by a single edge. + G = nx.barbell_graph(3, 0) + source = 0 + bridges = list(nx.bridges(G, source)) + assert bridges == [(2, 3)] + + def test_multiedge_bridge(self): + edges = [ + (0, 1), + (0, 2), + (1, 2), + (1, 2), + (2, 3), + (3, 4), + (3, 4), + ] + G = nx.MultiGraph(edges) + assert list(nx.bridges(G)) == [(2, 3)] + + +class TestHasBridges: + """Unit tests for the has bridges function.""" + + def test_single_bridge(self): + edges = [ + # DFS tree edges. + (1, 2), + (2, 3), + (3, 4), + (3, 5), + (5, 6), # The only bridge edge + (6, 7), + (7, 8), + (5, 9), + (9, 10), + # Nontree edges. + (1, 3), + (1, 4), + (2, 5), + (5, 10), + (6, 8), + ] + G = nx.Graph(edges) + assert nx.has_bridges(G) # Default root + assert nx.has_bridges(G, root=1) # arbitrary root in G + + def test_has_bridges_raises_root_not_in_G(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + with pytest.raises(nx.NodeNotFound): + nx.has_bridges(G, root=6) + + def test_multiedge_bridge(self): + edges = [ + (0, 1), + (0, 2), + (1, 2), + (1, 2), + (2, 3), + (3, 4), + (3, 4), + ] + G = nx.MultiGraph(edges) + assert nx.has_bridges(G) + # Make every edge a multiedge + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + assert not nx.has_bridges(G) + + def test_bridges_multiple_components(self): + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) # One connected component + nx.add_path(G, [4, 5, 6]) # Another connected component + assert list(nx.bridges(G, root=4)) == [(4, 5), (5, 6)] + + +class TestLocalBridges: + """Unit tests for the local_bridge function.""" + + @classmethod + def setup_class(cls): + cls.BB = nx.barbell_graph(4, 0) + cls.square = nx.cycle_graph(4) + cls.tri = nx.cycle_graph(3) + + def test_nospan(self): + expected = {(3, 4), (4, 3)} + assert next(nx.local_bridges(self.BB, with_span=False)) in expected + assert set(nx.local_bridges(self.square, with_span=False)) == self.square.edges + assert list(nx.local_bridges(self.tri, with_span=False)) == [] + + def test_no_weight(self): + inf = float("inf") + expected = {(3, 4, inf), (4, 3, inf)} + assert next(nx.local_bridges(self.BB)) in expected + expected = {(u, v, 3) for u, v, in self.square.edges} + assert set(nx.local_bridges(self.square)) == expected + assert list(nx.local_bridges(self.tri)) == [] + + def test_weight(self): + inf = float("inf") + G = self.square.copy() + + G.edges[1, 2]["weight"] = 2 + expected = {(u, v, 5 - wt) for u, v, wt in G.edges(data="weight", default=1)} + assert set(nx.local_bridges(G, weight="weight")) == expected + + expected = {(u, v, 6) for u, v in G.edges} + lb = nx.local_bridges(G, weight=lambda u, v, d: 2) + assert set(lb) == expected diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_clique.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_clique.py new file mode 100644 index 0000000000000000000000000000000000000000..3bee210982888a142f07a043bbde24bdad80fae9 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_clique.py @@ -0,0 +1,291 @@ +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti + + +class TestCliques: + def setup_method(self): + z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1] + self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1) + self.cl = list(nx.find_cliques(self.G)) + H = nx.complete_graph(6) + H = nx.relabel_nodes(H, {i: i + 1 for i in range(6)}) + H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)]) + self.H = H + + def test_find_cliques1(self): + cl = list(nx.find_cliques(self.G)) + rcl = nx.find_cliques_recursive(self.G) + expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + assert sorted(map(sorted, cl)) == sorted(map(sorted, rcl)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + def test_selfloops(self): + self.G.add_edge(1, 1) + cl = list(nx.find_cliques(self.G)) + rcl = list(nx.find_cliques_recursive(self.G)) + assert set(map(frozenset, cl)) == set(map(frozenset, rcl)) + answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}] + assert len(answer) == len(cl) + assert all(set(c) in answer for c in cl) + + def test_find_cliques2(self): + hcl = list(nx.find_cliques(self.H)) + assert sorted(map(sorted, hcl)) == [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]] + + def test_find_cliques3(self): + # all cliques are [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + + cl = list(nx.find_cliques(self.G, [2])) + rcl = nx.find_cliques_recursive(self.G, [2]) + expected = [[2, 6, 1, 3], [2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 3])) + rcl = nx.find_cliques_recursive(self.G, [2, 3]) + expected = [[2, 6, 1, 3]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + with pytest.raises(ValueError): + list(nx.find_cliques(self.G, [2, 6, 4, 1])) + + with pytest.raises(ValueError): + list(nx.find_cliques_recursive(self.G, [2, 6, 4, 1])) + + def test_number_of_cliques(self): + G = self.G + assert nx.number_of_cliques(G, 1) == 1 + assert list(nx.number_of_cliques(G, [1]).values()) == [1] + assert list(nx.number_of_cliques(G, [1, 2]).values()) == [1, 2] + assert nx.number_of_cliques(G, [1, 2]) == {1: 1, 2: 2} + assert nx.number_of_cliques(G, 2) == 2 + assert nx.number_of_cliques(G) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=list(G)) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=[2, 3, 4]) == {2: 2, 3: 1, 4: 2} + assert nx.number_of_cliques(G, cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, list(G), cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + + def test_node_clique_number(self): + G = self.G + assert nx.node_clique_number(G, 1) == 4 + assert list(nx.node_clique_number(G, [1]).values()) == [4] + assert list(nx.node_clique_number(G, [1, 2]).values()) == [4, 4] + assert nx.node_clique_number(G, [1, 2]) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1) == 4 + assert nx.node_clique_number(G) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, cliques=self.cl) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, [1, 2], cliques=self.cl) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1, cliques=self.cl) == 4 + + def test_make_clique_bipartite(self): + G = self.G + B = nx.make_clique_bipartite(G) + assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + # Project onto the nodes of the original graph. + H = nx.projected_graph(B, range(1, 12)) + assert H.adj == G.adj + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as positive ones. + H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) + assert sorted(H1) == [1, 2, 3, 4, 5] + + def test_make_max_clique_graph(self): + """Tests that the maximal clique graph is the same as the bipartite + clique graph after being projected onto the nodes representing the + cliques. + + """ + G = self.G + B = nx.make_clique_bipartite(G) + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as nonnegative ones, starting at + # 0. + H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) + H2 = nx.make_max_clique_graph(G) + assert H1.adj == H2.adj + + def test_directed(self): + with pytest.raises(nx.NetworkXNotImplemented): + next(nx.find_cliques(nx.DiGraph())) + + def test_find_cliques_trivial(self): + G = nx.Graph() + assert sorted(nx.find_cliques(G)) == [] + assert sorted(nx.find_cliques_recursive(G)) == [] + + def test_make_max_clique_graph_create_using(self): + G = nx.Graph([(1, 2), (3, 1), (4, 1), (5, 6)]) + E = nx.Graph([(0, 1), (0, 2), (1, 2)]) + E.add_node(3) + assert nx.is_isomorphic(nx.make_max_clique_graph(G, create_using=nx.Graph), E) + + +class TestEnumerateAllCliques: + def test_paper_figure_4(self): + # Same graph as given in Fig. 4 of paper enumerate_all_cliques is + # based on. + # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129 + G = nx.Graph() + edges_fig_4 = [ + ("a", "b"), + ("a", "c"), + ("a", "d"), + ("a", "e"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("c", "d"), + ("c", "e"), + ("d", "e"), + ("f", "b"), + ("f", "c"), + ("f", "g"), + ("g", "f"), + ("g", "c"), + ("g", "d"), + ("g", "e"), + ] + G.add_edges_from(edges_fig_4) + + cliques = list(nx.enumerate_all_cliques(G)) + clique_sizes = list(map(len, cliques)) + assert sorted(clique_sizes) == clique_sizes + + expected_cliques = [ + ["a"], + ["b"], + ["c"], + ["d"], + ["e"], + ["f"], + ["g"], + ["a", "b"], + ["a", "b", "d"], + ["a", "b", "d", "e"], + ["a", "b", "e"], + ["a", "c"], + ["a", "c", "d"], + ["a", "c", "d", "e"], + ["a", "c", "e"], + ["a", "d"], + ["a", "d", "e"], + ["a", "e"], + ["b", "c"], + ["b", "c", "d"], + ["b", "c", "d", "e"], + ["b", "c", "e"], + ["b", "c", "f"], + ["b", "d"], + ["b", "d", "e"], + ["b", "e"], + ["b", "f"], + ["c", "d"], + ["c", "d", "e"], + ["c", "d", "e", "g"], + ["c", "d", "g"], + ["c", "e"], + ["c", "e", "g"], + ["c", "f"], + ["c", "f", "g"], + ["c", "g"], + ["d", "e"], + ["d", "e", "g"], + ["d", "g"], + ["e", "g"], + ["f", "g"], + ["a", "b", "c"], + ["a", "b", "c", "d"], + ["a", "b", "c", "d", "e"], + ["a", "b", "c", "e"], + ] + + assert sorted(map(sorted, cliques)) == sorted(map(sorted, expected_cliques)) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_core.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_core.py new file mode 100644 index 0000000000000000000000000000000000000000..535af31b680209a9b57895246c2eb2ae268c9d55 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_core.py @@ -0,0 +1,185 @@ +import pytest + +import networkx as nx +from networkx.utils import nodes_equal + + +class TestCore: + @classmethod + def setup_class(cls): + # G is the example graph in Figure 1 from Batagelj and + # Zaversnik's paper titled An O(m) Algorithm for Cores + # Decomposition of Networks, 2003, + # http://arXiv.org/abs/cs/0310049. With nodes labeled as + # shown, the 3-core is given by nodes 1-8, the 2-core by nodes + # 9-16, the 1-core by nodes 17-20 and node 21 is in the + # 0-core. + t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1) + t2 = nx.convert_node_labels_to_integers(t1, 5) + G = nx.union(t1, t2) + G.add_edges_from( + [ + (3, 7), + (2, 11), + (11, 5), + (11, 12), + (5, 12), + (12, 19), + (12, 18), + (3, 9), + (7, 9), + (7, 10), + (9, 10), + (9, 20), + (17, 13), + (13, 14), + (14, 15), + (15, 16), + (16, 13), + ] + ) + G.add_node(21) + cls.G = G + + # Create the graph H resulting from the degree sequence + # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm. + + degseq = [0, 1, 2, 2, 2, 2, 3] + H = nx.havel_hakimi_graph(degseq) + mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5} + cls.H = nx.relabel_nodes(H, mapping) + + def test_trivial(self): + """Empty graph""" + G = nx.Graph() + assert nx.core_number(G) == {} + + def test_core_number(self): + core = nx.core_number(self.G) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(4)] + assert nodes_equal(nodes_by_core[0], [21]) + assert nodes_equal(nodes_by_core[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_core[2], [9, 10, 11, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8]) + + def test_core_number2(self): + core = nx.core_number(self.H) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(3)] + assert nodes_equal(nodes_by_core[0], [0]) + assert nodes_equal(nodes_by_core[1], [1, 3]) + assert nodes_equal(nodes_by_core[2], [2, 4, 5, 6]) + + def test_core_number_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match="Input graph has self loops"): + nx.core_number(G) + + def test_directed_core_number(self): + """core number had a bug for directed graphs found in issue #1959""" + # small example where too timid edge removal can make cn[2] = 3 + G = nx.DiGraph() + edges = [(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)] + G.add_edges_from(edges) + assert nx.core_number(G) == {1: 2, 2: 2, 3: 2, 4: 2} + # small example where too aggressive edge removal can make cn[2] = 2 + more_edges = [(1, 5), (3, 5), (4, 5), (3, 6), (4, 6), (5, 6)] + G.add_edges_from(more_edges) + assert nx.core_number(G) == {1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3} + + def test_main_core(self): + main_core_subgraph = nx.k_core(self.H) + assert sorted(main_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_core(self): + # k=0 + k_core_subgraph = nx.k_core(self.H, k=0) + assert sorted(k_core_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_core_subgraph = nx.k_core(self.H, k=1) + assert sorted(k_core_subgraph.nodes()) == [1, 2, 3, 4, 5, 6] + # k = 2 + k_core_subgraph = nx.k_core(self.H, k=2) + assert sorted(k_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_main_crust(self): + main_crust_subgraph = nx.k_crust(self.H) + assert sorted(main_crust_subgraph.nodes()) == [0, 1, 3] + + def test_k_crust(self): + # k = 0 + k_crust_subgraph = nx.k_crust(self.H, k=2) + assert sorted(k_crust_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_crust_subgraph = nx.k_crust(self.H, k=1) + assert sorted(k_crust_subgraph.nodes()) == [0, 1, 3] + # k=2 + k_crust_subgraph = nx.k_crust(self.H, k=0) + assert sorted(k_crust_subgraph.nodes()) == [0] + + def test_main_shell(self): + main_shell_subgraph = nx.k_shell(self.H) + assert sorted(main_shell_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_shell(self): + # k=0 + k_shell_subgraph = nx.k_shell(self.H, k=2) + assert sorted(k_shell_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_shell_subgraph = nx.k_shell(self.H, k=1) + assert sorted(k_shell_subgraph.nodes()) == [1, 3] + # k=2 + k_shell_subgraph = nx.k_shell(self.H, k=0) + assert sorted(k_shell_subgraph.nodes()) == [0] + + def test_k_corona(self): + # k=0 + k_corona_subgraph = nx.k_corona(self.H, k=2) + assert sorted(k_corona_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_corona_subgraph = nx.k_corona(self.H, k=1) + assert sorted(k_corona_subgraph.nodes()) == [1] + # k=2 + k_corona_subgraph = nx.k_corona(self.H, k=0) + assert sorted(k_corona_subgraph.nodes()) == [0] + + def test_k_truss(self): + # k=-1 + k_truss_subgraph = nx.k_truss(self.G, -1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=0 + k_truss_subgraph = nx.k_truss(self.G, 0) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=1 + k_truss_subgraph = nx.k_truss(self.G, 1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=2 + k_truss_subgraph = nx.k_truss(self.G, 2) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=3 + k_truss_subgraph = nx.k_truss(self.G, 3) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 13)) + + k_truss_subgraph = nx.k_truss(self.G, 4) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 9)) + + k_truss_subgraph = nx.k_truss(self.G, 5) + assert sorted(k_truss_subgraph.nodes()) == [] + + def test_onion_layers(self): + layers = nx.onion_layers(self.G) + nodes_by_layer = [ + sorted(n for n in layers if layers[n] == val) for val in range(1, 7) + ] + assert nodes_equal(nodes_by_layer[0], [21]) + assert nodes_equal(nodes_by_layer[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_layer[2], [10, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_layer[3], [9, 11]) + assert nodes_equal(nodes_by_layer[4], [1, 2, 4, 5, 6, 8]) + assert nodes_equal(nodes_by_layer[5], [3, 7]) + + def test_onion_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match="Input graph contains self loops"): + nx.onion_layers(G) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_cycles.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..230f513519bc751c61c7570207f886b770079627 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_cycles.py @@ -0,0 +1,971 @@ +from itertools import chain, islice, tee +from math import inf +from random import shuffle + +import pytest + +import networkx as nx +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +def check_independent(basis): + if len(basis) == 0: + return + try: + import numpy as np + except ImportError: + return + + H = nx.Graph() + for b in basis: + nx.add_cycle(H, b) + inc = nx.incidence_matrix(H, oriented=True) + rank = np.linalg.matrix_rank(inc.toarray(), tol=None, hermitian=False) + assert inc.shape[1] - rank == len(basis) + + +class TestCycles: + @classmethod + def setup_class(cls): + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 3, 4, 5]) + nx.add_cycle(G, [0, 1, 6, 7, 8]) + G.add_edge(8, 9) + cls.G = G + + def is_cyclic_permutation(self, a, b): + n = len(a) + if len(b) != n: + return False + l = a + a + return any(l[i : i + n] == b for i in range(n)) + + def test_cycle_basis(self): + G = self.G + cy = nx.cycle_basis(G, 0) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = nx.cycle_basis(G, 1) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = nx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + # test disconnected graphs + nx.add_cycle(G, "ABC") + cy = nx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])] + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5], ["A", "B", "C"]] + + def test_cycle_basis2(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + cy = nx.cycle_basis(G, 0) + + def test_cycle_basis3(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + cy = nx.cycle_basis(G, 0) + + def test_cycle_basis_ordered(self): + # see gh-6654 replace sets with (ordered) dicts + G = nx.cycle_graph(5) + G.update(nx.cycle_graph(range(3, 8))) + cbG = nx.cycle_basis(G) + + perm = {1: 0, 0: 1} # switch 0 and 1 + H = nx.relabel_nodes(G, perm) + cbH = [[perm.get(n, n) for n in cyc] for cyc in nx.cycle_basis(H)] + assert cbG == cbH + + def test_cycle_basis_self_loop(self): + """Tests the function for graphs with self loops""" + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 0, 6, 2]) + cy = nx.cycle_basis(G) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0], [0, 1, 2], [0, 2, 3], [0, 2, 6]] + + def test_simple_cycles(self): + edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + G = nx.DiGraph(edges) + cc = sorted(nx.simple_cycles(G)) + ca = [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + assert len(cc) == len(ca) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_unsortable(self): + # this test ensures that graphs whose nodes without an intrinsic + # ordering do not cause issues + G = nx.DiGraph() + nx.add_cycle(G, ["a", 1]) + c = list(nx.simple_cycles(G)) + assert len(c) == 1 + + def test_simple_cycles_small(self): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3]) + c = sorted(nx.simple_cycles(G)) + assert len(c) == 1 + assert self.is_cyclic_permutation(c[0], [1, 2, 3]) + nx.add_cycle(G, [10, 20, 30]) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 2 + ca = [[1, 2, 3], [10, 20, 30]] + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_simple_cycles_empty(self): + G = nx.DiGraph() + assert list(nx.simple_cycles(G)) == [] + + def worst_case_graph(self, k): + # see figure 1 in Johnson's paper + # this graph has exactly 3k simple cycles + G = nx.DiGraph() + for n in range(2, k + 2): + G.add_edge(1, n) + G.add_edge(n, k + 2) + G.add_edge(2 * k + 1, 1) + for n in range(k + 2, 2 * k + 2): + G.add_edge(n, 2 * k + 2) + G.add_edge(n, n + 1) + G.add_edge(2 * k + 3, k + 2) + for n in range(2 * k + 3, 3 * k + 3): + G.add_edge(2 * k + 2, n) + G.add_edge(n, 3 * k + 3) + G.add_edge(3 * k + 3, 2 * k + 2) + return G + + def test_worst_case_graph(self): + # see figure 1 in Johnson's paper + for k in range(3, 10): + G = self.worst_case_graph(k) + l = len(list(nx.simple_cycles(G))) + assert l == 3 * k + + def test_recursive_simple_and_not(self): + for k in range(2, 10): + G = self.worst_case_graph(k) + cc = sorted(nx.simple_cycles(G)) + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, r) for r in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + def test_simple_graph_with_reported_bug(self): + G = nx.DiGraph() + edges = [ + (0, 2), + (0, 3), + (1, 0), + (1, 3), + (2, 1), + (2, 4), + (3, 2), + (3, 4), + (4, 0), + (4, 1), + (4, 5), + (5, 0), + (5, 1), + (5, 2), + (5, 3), + ] + G.add_edges_from(edges) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 26 + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + +def pairwise(iterable): + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +def cycle_edges(c): + return pairwise(chain(c, islice(c, 1))) + + +def directed_cycle_edgeset(c): + return frozenset(cycle_edges(c)) + + +def undirected_cycle_edgeset(c): + if len(c) == 1: + return frozenset(cycle_edges(c)) + return frozenset(map(frozenset, cycle_edges(c))) + + +def multigraph_cycle_edgeset(c): + if len(c) <= 2: + return frozenset(cycle_edges(c)) + else: + return frozenset(map(frozenset, cycle_edges(c))) + + +class TestCycleEnumeration: + @staticmethod + def K(n): + return nx.complete_graph(n) + + @staticmethod + def D(n): + return nx.complete_graph(n).to_directed() + + @staticmethod + def edgeset_function(g): + if g.is_directed(): + return directed_cycle_edgeset + elif g.is_multigraph(): + return multigraph_cycle_edgeset + else: + return undirected_cycle_edgeset + + def check_cycle(self, g, c, es, cache, source, original_c, length_bound, chordless): + if length_bound is not None and len(c) > length_bound: + raise RuntimeError( + f"computed cycle {original_c} exceeds length bound {length_bound}" + ) + if source == "computed": + if es in cache: + raise RuntimeError( + f"computed cycle {original_c} has already been found!" + ) + else: + cache[es] = tuple(original_c) + else: + if es in cache: + cache.pop(es) + else: + raise RuntimeError(f"expected cycle {original_c} was not computed") + + if not all(g.has_edge(*e) for e in es): + raise RuntimeError( + f"{source} claimed cycle {original_c} is not a cycle of g" + ) + if chordless and len(g.subgraph(c).edges) > len(c): + raise RuntimeError(f"{source} cycle {original_c} is not chordless") + + def check_cycle_algorithm( + self, + g, + expected_cycles, + length_bound=None, + chordless=False, + algorithm=None, + ): + if algorithm is None: + algorithm = nx.chordless_cycles if chordless else nx.simple_cycles + + # note: we shuffle the labels of g to rule out accidentally-correct + # behavior which occurred during the development of chordless cycle + # enumeration algorithms + + relabel = list(range(len(g))) + shuffle(relabel) + label = dict(zip(g, relabel)) + unlabel = dict(zip(relabel, g)) + h = nx.relabel_nodes(g, label, copy=True) + + edgeset = self.edgeset_function(h) + + params = {} + if length_bound is not None: + params["length_bound"] = length_bound + + cycle_cache = {} + for c in algorithm(h, **params): + original_c = [unlabel[x] for x in c] + es = edgeset(c) + self.check_cycle( + h, c, es, cycle_cache, "computed", original_c, length_bound, chordless + ) + + if isinstance(expected_cycles, int): + if len(cycle_cache) != expected_cycles: + raise RuntimeError( + f"expected {expected_cycles} cycles, got {len(cycle_cache)}" + ) + return + for original_c in expected_cycles: + c = [label[x] for x in original_c] + es = edgeset(c) + self.check_cycle( + h, c, es, cycle_cache, "expected", original_c, length_bound, chordless + ) + + if len(cycle_cache): + for c in cycle_cache.values(): + raise RuntimeError( + f"computed cycle {c} is valid but not in the expected cycle set!" + ) + + def check_cycle_enumeration_integer_sequence( + self, + g_family, + cycle_counts, + length_bound=None, + chordless=False, + algorithm=None, + ): + for g, num_cycles in zip(g_family, cycle_counts): + self.check_cycle_algorithm( + g, + num_cycles, + length_bound=length_bound, + chordless=chordless, + algorithm=algorithm, + ) + + def test_directed_chordless_cycle_digons(self): + g = nx.DiGraph() + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(5)[::-1]) + g.add_edge(0, 0) + expected_cycles = [(0,), (1, 2), (2, 3), (3, 4)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=2) + + expected_cycles = [c for c in expected_cycles if len(c) < 2] + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=1) + + def test_directed_chordless_cycle_undirected(self): + g = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 5), (5, 0), (5, 1), (0, 2)]) + expected_cycles = [(0, 2, 3, 4, 5), (1, 2, 3, 4, 5)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g = nx.DiGraph() + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(4, 9)) + g.add_edge(7, 3) + expected_cycles = [(0, 1, 2, 3, 4), (3, 4, 5, 6, 7), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g.add_edge(3, 7) + expected_cycles = [(0, 1, 2, 3, 4), (3, 7), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + expected_cycles = [(3, 7)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=4) + + g.remove_edge(7, 3) + expected_cycles = [(0, 1, 2, 3, 4), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g = nx.DiGraph((i, j) for i in range(10) for j in range(i)) + expected_cycles = [] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + def test_chordless_cycles_directed(self): + G = nx.DiGraph() + nx.add_cycle(G, range(5)) + nx.add_cycle(G, range(4, 12)) + expected = [[*range(5)], [*range(4, 12)]] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(7, 3) + expected.append([*range(3, 8)]) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(3, 7) + expected[-1] = [7, 3] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + expected.pop() + G.remove_edge(7, 3) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + def test_directed_chordless_cycle_diclique(self): + g_family = [self.D(n) for n in range(10)] + expected_cycles = [(n * n - n) // 2 for n in range(10)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected_cycles, chordless=True + ) + + expected_cycles = [(n * n - n) // 2 for n in range(10)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected_cycles, length_bound=2 + ) + + def test_directed_chordless_loop_blockade(self): + g = nx.DiGraph((i, i) for i in range(10)) + nx.add_cycle(g, range(10)) + expected_cycles = [(i,) for i in range(10)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + self.check_cycle_algorithm(g, expected_cycles, length_bound=1) + + g = nx.MultiDiGraph(g) + g.add_edges_from((i, i) for i in range(0, 10, 2)) + expected_cycles = [(i,) for i in range(1, 10, 2)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + def test_simple_cycles_notable_clique_sequences(self): + # A000292: Number of labeled graphs on n+3 nodes that are triangles. + g_family = [self.K(n) for n in range(2, 12)] + expected = [0, 1, 4, 10, 20, 35, 56, 84, 120, 165, 220] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=3 + ) + + def triangles(g, **kwargs): + yield from (c for c in nx.simple_cycles(g, **kwargs) if len(c) == 3) + + # directed complete graphs have twice as many triangles thanks to reversal + g_family = [self.D(n) for n in range(2, 12)] + expected = [2 * e for e in expected] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=3, algorithm=triangles + ) + + def four_cycles(g, **kwargs): + yield from (c for c in nx.simple_cycles(g, **kwargs) if len(c) == 4) + + # A050534: the number of 4-cycles in the complete graph K_{n+1} + expected = [0, 0, 0, 3, 15, 45, 105, 210, 378, 630, 990] + g_family = [self.K(n) for n in range(1, 12)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=4, algorithm=four_cycles + ) + + # directed complete graphs have twice as many 4-cycles thanks to reversal + expected = [2 * e for e in expected] + g_family = [self.D(n) for n in range(1, 15)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=4, algorithm=four_cycles + ) + + # A006231: the number of elementary circuits in a complete directed graph with n nodes + expected = [0, 1, 5, 20, 84, 409, 2365] + g_family = [self.D(n) for n in range(1, 8)] + self.check_cycle_enumeration_integer_sequence(g_family, expected) + + # A002807: Number of cycles in the complete graph on n nodes K_{n}. + expected = [0, 0, 0, 1, 7, 37, 197, 1172] + g_family = [self.K(n) for n in range(8)] + self.check_cycle_enumeration_integer_sequence(g_family, expected) + + def test_directed_chordless_cycle_parallel_multiedges(self): + g = nx.MultiGraph() + + nx.add_cycle(g, range(5)) + expected = [[*range(5)]] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + expected = [*cycle_edges(range(5))] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + expected = [] + self.check_cycle_algorithm(g, expected, chordless=True) + + g = nx.MultiDiGraph() + + nx.add_cycle(g, range(5)) + expected = [[*range(5)]] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + g = nx.MultiDiGraph() + + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(5)[::-1]) + expected = [*cycle_edges(range(5))] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + def test_chordless_cycles_graph(self): + G = nx.Graph() + nx.add_cycle(G, range(5)) + nx.add_cycle(G, range(4, 12)) + expected = [[*range(5)], [*range(4, 12)]] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(7, 3) + expected.append([*range(3, 8)]) + expected.append([4, 3, 7, 8, 9, 10, 11]) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + def test_chordless_cycles_giant_hamiltonian(self): + # ... o - e - o - e - o ... # o = odd, e = even + # ... ---/ \-----/ \--- ... # <-- "long" edges + # + # each long edge belongs to exactly one triangle, and one giant cycle + # of length n/2. The remaining edges each belong to a triangle + + n = 1000 + assert n % 2 == 0 + G = nx.Graph() + for v in range(n): + if not v % 2: + G.add_edge(v, (v + 2) % n) + G.add_edge(v, (v + 1) % n) + + expected = [[*range(0, n, 2)]] + [ + [x % n for x in range(i, i + 3)] for i in range(0, n, 2) + ] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 3], length_bound=3, chordless=True + ) + + # ... o -> e -> o -> e -> o ... # o = odd, e = even + # ... <---/ \---<---/ \---< ... # <-- "long" edges + # + # this time, we orient the short and long edges in opposition + # the cycle structure of this graph is the same, but we need to reverse + # the long one in our representation. Also, we need to drop the size + # because our partitioning algorithm uses strongly connected components + # instead of separating graphs by their strong articulation points + + n = 100 + assert n % 2 == 0 + G = nx.DiGraph() + for v in range(n): + G.add_edge(v, (v + 1) % n) + if not v % 2: + G.add_edge((v + 2) % n, v) + + expected = [[*range(n - 2, -2, -2)]] + [ + [x % n for x in range(i, i + 3)] for i in range(0, n, 2) + ] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 3], length_bound=3, chordless=True + ) + + def test_simple_cycles_acyclic_tournament(self): + n = 10 + G = nx.DiGraph((x, y) for x in range(n) for y in range(x)) + self.check_cycle_algorithm(G, []) + self.check_cycle_algorithm(G, [], chordless=True) + + for k in range(n + 1): + self.check_cycle_algorithm(G, [], length_bound=k) + self.check_cycle_algorithm(G, [], length_bound=k, chordless=True) + + def test_simple_cycles_graph(self): + testG = nx.cycle_graph(8) + cyc1 = tuple(range(8)) + self.check_cycle_algorithm(testG, [cyc1]) + + testG.add_edge(4, -1) + nx.add_path(testG, [3, -2, -3, -4]) + self.check_cycle_algorithm(testG, [cyc1]) + + testG.update(nx.cycle_graph(range(8, 16))) + cyc2 = tuple(range(8, 16)) + self.check_cycle_algorithm(testG, [cyc1, cyc2]) + + testG.update(nx.cycle_graph(range(4, 12))) + cyc3 = tuple(range(4, 12)) + expected = { + (0, 1, 2, 3, 4, 5, 6, 7), # cyc1 + (8, 9, 10, 11, 12, 13, 14, 15), # cyc2 + (4, 5, 6, 7, 8, 9, 10, 11), # cyc3 + (4, 5, 6, 7, 8, 15, 14, 13, 12, 11), # cyc2 + cyc3 + (0, 1, 2, 3, 4, 11, 10, 9, 8, 7), # cyc1 + cyc3 + (0, 1, 2, 3, 4, 11, 12, 13, 14, 15, 8, 7), # cyc1 + cyc2 + cyc3 + } + self.check_cycle_algorithm(testG, expected) + assert len(expected) == (2**3 - 1) - 1 # 1 disjoint comb: cyc1 + cyc2 + + # Basis size = 5 (2 loops overlapping gives 5 small loops + # E + # / \ Note: A-F = 10-15 + # 1-2-3-4-5 + # / | | \ cyc1=012DAB -- left + # 0 D F 6 cyc2=234E -- top + # \ | | / cyc3=45678F -- right + # B-A-9-8-7 cyc4=89AC -- bottom + # \ / cyc5=234F89AD -- middle + # C + # + # combinations of 5 basis elements: 2^5 - 1 (one includes no cycles) + # + # disjoint combs: (11 total) not simple cycles + # Any pair not including cyc5 => choose(4, 2) = 6 + # Any triple not including cyc5 => choose(4, 3) = 4 + # Any quad not including cyc5 => choose(4, 4) = 1 + # + # we expect 31 - 11 = 20 simple cycles + # + testG = nx.cycle_graph(12) + testG.update(nx.cycle_graph([12, 10, 13, 2, 14, 4, 15, 8]).edges) + expected = (2**5 - 1) - 11 # 11 disjoint combinations + self.check_cycle_algorithm(testG, expected) + + def test_simple_cycles_bounded(self): + # iteratively construct a cluster of nested cycles running in the same direction + # there should be one cycle of every length + d = nx.DiGraph() + expected = [] + for n in range(10): + nx.add_cycle(d, range(n)) + expected.append(n) + for k, e in enumerate(expected): + self.check_cycle_algorithm(d, e, length_bound=k) + + # iteratively construct a path of undirected cycles, connected at articulation + # points. there should be one cycle of every length except 2: no digons + g = nx.Graph() + top = 0 + expected = [] + for n in range(10): + expected.append(n if n < 2 else n - 1) + if n == 2: + # no digons in undirected graphs + continue + nx.add_cycle(g, range(top, top + n)) + top += n + for k, e in enumerate(expected): + self.check_cycle_algorithm(g, e, length_bound=k) + + def test_simple_cycles_bound_corner_cases(self): + G = nx.cycle_graph(4) + DG = nx.cycle_graph(4, create_using=nx.DiGraph) + assert list(nx.simple_cycles(G, length_bound=0)) == [] + assert list(nx.simple_cycles(DG, length_bound=0)) == [] + assert list(nx.chordless_cycles(G, length_bound=0)) == [] + assert list(nx.chordless_cycles(DG, length_bound=0)) == [] + + def test_simple_cycles_bound_error(self): + with pytest.raises(ValueError): + G = nx.DiGraph() + for c in nx.simple_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.Graph() + for c in nx.simple_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.Graph() + for c in nx.chordless_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.DiGraph() + for c in nx.chordless_cycles(G, -1): + assert False + + def test_chordless_cycles_clique(self): + g_family = [self.K(n) for n in range(2, 15)] + expected = [0, 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, chordless=True + ) + + # directed cliques have as many digons as undirected graphs have edges + expected = [(n * n - n) // 2 for n in range(15)] + g_family = [self.D(n) for n in range(15)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, chordless=True + ) + + +# These tests might fail with hash randomization since they depend on +# edge_dfs. For more information, see the comments in: +# networkx/algorithms/traversal/tests/test_edgedfs.py + + +class TestFindCycle: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(-1, 0), (0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_graph_nocycle(self): + G = nx.Graph(self.edges) + pytest.raises(nx.exception.NetworkXNoCycle, nx.find_cycle, G, self.nodes) + + def test_graph_cycle(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_none(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_original(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 0, FORWARD)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2) + # Hash randomization...could be any edge. + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_digraph_reverse(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + assert x[1][3] == x_[1][3] + + def test_multidigraph_ignore2(self): + # Loop traversed an edge while ignoring its orientation. + G = nx.MultiDiGraph([(0, 1), (1, 2), (1, 2)]) + x = list(nx.find_cycle(G, [0, 1, 2], orientation="ignore")) + x_ = [(1, 2, 0, FORWARD), (1, 2, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_original(self): + # Node 2 doesn't need to be searched again from visited from 4. + # The goal here is to cover the case when 2 to be researched from 4, + # when 4 is visited from the first time (so we must make sure that 4 + # is not visited from 2, and hence, we respect the edge orientation). + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 3), (4, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, + nx.find_cycle, + G, + [0, 1, 2, 3, 4], + orientation="original", + ) + + def test_dag(self): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, nx.find_cycle, G, orientation="original" + ) + x = list(nx.find_cycle(G, orientation="ignore")) + assert x == [(0, 1, FORWARD), (1, 2, FORWARD), (0, 2, REVERSE)] + + def test_prev_explored(self): + # https://github.com/networkx/networkx/issues/2323 + + G = nx.DiGraph() + G.add_edges_from([(1, 0), (2, 0), (1, 2), (2, 1)]) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0) + x = list(nx.find_cycle(G, 1)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + x = list(nx.find_cycle(G, 2)) + x_ = [(2, 1), (1, 2)] + assert x == x_ + + x = list(nx.find_cycle(G)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + def test_no_cycle(self): + # https://github.com/networkx/networkx/issues/2439 + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 0), (3, 1), (3, 2)]) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G) + + +def assert_basis_equal(a, b): + assert sorted(a) == sorted(b) + + +class TestMinimumCycleBasis: + @classmethod + def setup_class(cls): + T = nx.Graph() + nx.add_cycle(T, [1, 2, 3, 4], weight=1) + T.add_edge(2, 4, weight=5) + cls.diamond_graph = T + + def test_unweighted_diamond(self): + mcb = nx.minimum_cycle_basis(self.diamond_graph) + assert_basis_equal(mcb, [[2, 4, 1], [3, 4, 2]]) + + def test_weighted_diamond(self): + mcb = nx.minimum_cycle_basis(self.diamond_graph, weight="weight") + assert_basis_equal(mcb, [[2, 4, 1], [4, 3, 2, 1]]) + + def test_dimensionality(self): + # checks |MCB|=|E|-|V|+|NC| + ntrial = 10 + for seed in range(1234, 1234 + ntrial): + rg = nx.erdos_renyi_graph(10, 0.3, seed=seed) + nnodes = rg.number_of_nodes() + nedges = rg.number_of_edges() + ncomp = nx.number_connected_components(rg) + + mcb = nx.minimum_cycle_basis(rg) + assert len(mcb) == nedges - nnodes + ncomp + check_independent(mcb) + + def test_complete_graph(self): + cg = nx.complete_graph(5) + mcb = nx.minimum_cycle_basis(cg) + assert all(len(cycle) == 3 for cycle in mcb) + check_independent(mcb) + + def test_tree_graph(self): + tg = nx.balanced_tree(3, 3) + assert not nx.minimum_cycle_basis(tg) + + def test_petersen_graph(self): + G = nx.petersen_graph() + mcb = list(nx.minimum_cycle_basis(G)) + expected = [ + [4, 9, 7, 5, 0], + [1, 2, 3, 4, 0], + [1, 6, 8, 5, 0], + [4, 3, 8, 5, 0], + [1, 6, 9, 4, 0], + [1, 2, 7, 5, 0], + ] + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + # check that order of the nodes is a path + for c in mcb: + assert all(G.has_edge(u, v) for u, v in nx.utils.pairwise(c, cyclic=True)) + # check independence of the basis + check_independent(mcb) + + def test_gh6787_variable_weighted_complete_graph(self): + N = 8 + cg = nx.complete_graph(N) + cg.add_weighted_edges_from([(u, v, 9) for u, v in cg.edges]) + cg.add_weighted_edges_from([(u, v, 1) for u, v in nx.cycle_graph(N).edges]) + mcb = nx.minimum_cycle_basis(cg, weight="weight") + check_independent(mcb) + + def test_gh6787_and_edge_attribute_names(self): + G = nx.cycle_graph(4) + G.add_weighted_edges_from([(0, 2, 10), (1, 3, 10)], weight="dist") + expected = [[1, 3, 0], [3, 2, 1, 0], [1, 2, 0]] + mcb = list(nx.minimum_cycle_basis(G, weight="dist")) + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + # test not using a weight with weight attributes + expected = [[1, 3, 0], [1, 2, 0], [3, 2, 0]] + mcb = list(nx.minimum_cycle_basis(G)) + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + +class TestGirth: + @pytest.mark.parametrize( + ("G", "expected"), + ( + (nx.chvatal_graph(), 4), + (nx.tutte_graph(), 4), + (nx.petersen_graph(), 5), + (nx.heawood_graph(), 6), + (nx.pappus_graph(), 6), + (nx.random_tree(10, seed=42), inf), + (nx.empty_graph(10), inf), + (nx.Graph(chain(cycle_edges(range(5)), cycle_edges(range(6, 10)))), 4), + ( + nx.Graph( + [ + (0, 6), + (0, 8), + (0, 9), + (1, 8), + (2, 8), + (2, 9), + (4, 9), + (5, 9), + (6, 8), + (6, 9), + (7, 8), + ] + ), + 3, + ), + ), + ) + def test_girth(self, G, expected): + assert nx.girth(G) == expected diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_efficiency.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_efficiency.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2e7d0463b3a0abeb8395df4ab870456faa64b7 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_efficiency.py @@ -0,0 +1,58 @@ +"""Unit tests for the :mod:`networkx.algorithms.efficiency` module.""" + +import networkx as nx + + +class TestEfficiency: + def setup_method(self): + # G1 is a disconnected graph + self.G1 = nx.Graph() + self.G1.add_nodes_from([1, 2, 3]) + # G2 is a cycle graph + self.G2 = nx.cycle_graph(4) + # G3 is the triangle graph with one additional edge + self.G3 = nx.lollipop_graph(3, 1) + + def test_efficiency_disconnected_nodes(self): + """ + When nodes are disconnected, efficiency is 0 + """ + assert nx.efficiency(self.G1, 1, 2) == 0 + + def test_local_efficiency_disconnected_graph(self): + """ + In a disconnected graph the efficiency is 0 + """ + assert nx.local_efficiency(self.G1) == 0 + + def test_efficiency(self): + assert nx.efficiency(self.G2, 0, 1) == 1 + assert nx.efficiency(self.G2, 0, 2) == 1 / 2 + + def test_global_efficiency(self): + assert nx.global_efficiency(self.G2) == 5 / 6 + + def test_global_efficiency_complete_graph(self): + """ + Tests that the average global efficiency of the complete graph is one. + """ + for n in range(2, 10): + G = nx.complete_graph(n) + assert nx.global_efficiency(G) == 1 + + def test_local_efficiency_complete_graph(self): + """ + Test that the local efficiency for a complete graph with at least 3 + nodes should be one. For a graph with only 2 nodes, the induced + subgraph has no edges. + """ + for n in range(3, 10): + G = nx.complete_graph(n) + assert nx.local_efficiency(G) == 1 + + def test_using_ego_graph(self): + """ + Test that the ego graph is used when computing local efficiency. + For more information, see GitHub issue #2710. + """ + assert nx.local_efficiency(self.G3) == 7 / 12 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py new file mode 100644 index 0000000000000000000000000000000000000000..66d75220327cb27c8b378505aea2780ea96021af --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py @@ -0,0 +1,427 @@ +from itertools import chain, combinations, product + +import pytest + +import networkx as nx + +tree_all_pairs_lca = nx.tree_all_pairs_lowest_common_ancestor +all_pairs_lca = nx.all_pairs_lowest_common_ancestor + + +def get_pair(dictionary, n1, n2): + if (n1, n2) in dictionary: + return dictionary[n1, n2] + else: + return dictionary[n2, n1] + + +class TestTreeLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + @staticmethod + def assert_has_same_pairs(d1, d2): + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert get_pair(d1, a, b) == get_pair(d2, a, b) + + def test_tree_all_pairs_lca_default_root(self): + assert dict(tree_all_pairs_lca(self.DG)) == self.ans + + def test_tree_all_pairs_lca_return_subset(self): + test_pairs = [(0, 1), (0, 1), (1, 0)] + ans = dict(tree_all_pairs_lca(self.DG, 0, test_pairs)) + assert (0, 1) in ans and (1, 0) in ans + assert len(ans) == 2 + + def test_tree_all_pairs_lca(self): + all_pairs = chain(combinations(self.DG, 2), ((node, node) for node in self.DG)) + + ans = dict(tree_all_pairs_lca(self.DG, 0, all_pairs)) + self.assert_has_same_pairs(ans, self.ans) + + def test_tree_all_pairs_gold_example(self): + ans = dict(tree_all_pairs_lca(self.DG)) + self.assert_has_same_pairs(self.gold, ans) + + def test_tree_all_pairs_lca_invalid_input(self): + empty_digraph = tree_all_pairs_lca(nx.DiGraph()) + pytest.raises(nx.NetworkXPointlessConcept, list, empty_digraph) + + bad_pairs_digraph = tree_all_pairs_lca(self.DG, pairs=[(-1, -2)]) + pytest.raises(nx.NodeNotFound, list, bad_pairs_digraph) + + def test_tree_all_pairs_lca_subtrees(self): + ans = dict(tree_all_pairs_lca(self.DG, 1)) + gold = { + pair: lca + for (pair, lca) in self.gold.items() + if all(n in (1, 3, 4) for n in pair) + } + self.assert_has_same_pairs(gold, ans) + + def test_tree_all_pairs_lca_disconnected_nodes(self): + G = nx.DiGraph() + G.add_node(1) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G)) + + G.add_node(0) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G, 1)) + assert {(0, 0): 0} == dict(tree_all_pairs_lca(G, 0)) + + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_error_if_input_not_tree(self): + # Cycle + G = nx.DiGraph([(1, 2), (2, 1)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + # DAG + G = nx.DiGraph([(0, 2), (1, 2)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_generator(self): + pairs = iter([(0, 1), (0, 1), (1, 0)]) + some_pairs = dict(tree_all_pairs_lca(self.DG, 0, pairs)) + assert (0, 1) in some_pairs and (1, 0) in some_pairs + assert len(some_pairs) == 2 + + def test_tree_all_pairs_lca_nonexisting_pairs_exception(self): + lca = tree_all_pairs_lca(self.DG, 0, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + # check if node is None + lca = tree_all_pairs_lca(self.DG, None, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + + def test_tree_all_pairs_lca_routine_bails_on_DAGs(self): + G = nx.DiGraph([(3, 4), (5, 4)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_not_implemented(self): + NNI = nx.NetworkXNotImplemented + G = nx.Graph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + + def test_tree_all_pairs_lca_trees_without_LCAs(self): + G = nx.DiGraph() + G.add_node(3) + ans = list(tree_all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + +class TestMultiTreeLCA(TestTreeLCA): + @classmethod + def setup_class(cls): + cls.DG = nx.MultiDiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + # add multiedges + cls.DG.add_edges_from(edges) + + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + +class TestDAGLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 3, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + def assert_lca_dicts_same(self, d1, d2, G=None): + """Checks if d1 and d2 contain the same pairs and + have a node at the same distance from root for each. + If G is None use self.DG.""" + if G is None: + G = self.DG + root_distance = self.root_distance + else: + roots = [n for n, deg in G.in_degree if deg == 0] + assert len(roots) == 1 + root_distance = nx.shortest_path_length(G, source=roots[0]) + + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert ( + root_distance[get_pair(d1, a, b)] == root_distance[get_pair(d2, a, b)] + ) + + def test_all_pairs_lca_gold_example(self): + self.assert_lca_dicts_same(dict(all_pairs_lca(self.DG)), self.gold) + + def test_all_pairs_lca_all_pairs_given(self): + all_pairs = list(product(self.DG.nodes(), self.DG.nodes())) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lca_generator(self): + all_pairs = product(self.DG.nodes(), self.DG.nodes()) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lca_input_graph_with_two_roots(self): + G = self.DG.copy() + G.add_edge(9, 10) + G.add_edge(9, 4) + gold = self.gold.copy() + gold[9, 9] = 9 + gold[9, 10] = 9 + gold[9, 4] = 9 + gold[9, 3] = 9 + gold[10, 4] = 9 + gold[10, 3] = 9 + gold[10, 10] = 10 + + testing = dict(all_pairs_lca(G)) + + G.add_edge(-1, 9) + G.add_edge(-1, 0) + self.assert_lca_dicts_same(testing, gold, G) + + def test_all_pairs_lca_nonexisting_pairs_exception(self): + pytest.raises(nx.NodeNotFound, all_pairs_lca, self.DG, [(-1, -1)]) + + def test_all_pairs_lca_pairs_without_lca(self): + G = self.DG.copy() + G.add_node(-1) + gen = all_pairs_lca(G, [(-1, -1), (-1, 0)]) + assert dict(gen) == {(-1, -1): -1} + + def test_all_pairs_lca_null_graph(self): + pytest.raises(nx.NetworkXPointlessConcept, all_pairs_lca, nx.DiGraph()) + + def test_all_pairs_lca_non_dags(self): + pytest.raises(nx.NetworkXError, all_pairs_lca, nx.DiGraph([(3, 4), (4, 3)])) + + def test_all_pairs_lca_nonempty_graph_without_lca(self): + G = nx.DiGraph() + G.add_node(3) + ans = list(all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + def test_all_pairs_lca_bug_gh4942(self): + G = nx.DiGraph([(0, 2), (1, 2), (2, 3)]) + ans = list(all_pairs_lca(G)) + assert len(ans) == 9 + + def test_all_pairs_lca_default_kwarg(self): + G = nx.DiGraph([(0, 1), (2, 1)]) + sentinel = object() + assert nx.lowest_common_ancestor(G, 0, 2, default=sentinel) is sentinel + + def test_all_pairs_lca_identity(self): + G = nx.DiGraph() + G.add_node(3) + assert nx.lowest_common_ancestor(G, 3, 3) == 3 + + def test_all_pairs_lca_issue_4574(self): + G = nx.DiGraph() + G.add_nodes_from(range(17)) + G.add_edges_from( + [ + (2, 0), + (1, 2), + (3, 2), + (5, 2), + (8, 2), + (11, 2), + (4, 5), + (6, 5), + (7, 8), + (10, 8), + (13, 11), + (14, 11), + (15, 11), + (9, 10), + (12, 13), + (16, 15), + ] + ) + + assert nx.lowest_common_ancestor(G, 7, 9) == None + + def test_all_pairs_lca_one_pair_gh4942(self): + G = nx.DiGraph() + # Note: order edge addition is critical to the test + G.add_edge(0, 1) + G.add_edge(2, 0) + G.add_edge(2, 3) + G.add_edge(4, 0) + G.add_edge(5, 2) + + assert nx.lowest_common_ancestor(G, 1, 3) == 2 + + +class TestMultiDiGraph_DAGLCA(TestDAGLCA): + @classmethod + def setup_class(cls): + cls.DG = nx.MultiDiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + # add multiedges + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 3, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + +def test_all_pairs_lca_self_ancestors(): + """Self-ancestors should always be the node itself, i.e. lca of (0, 0) is 0. + See gh-4458.""" + # DAG for test - note order of node/edge addition is relevant + G = nx.DiGraph() + G.add_nodes_from(range(5)) + G.add_edges_from([(1, 0), (2, 0), (3, 2), (4, 1), (4, 3)]) + + ap_lca = nx.all_pairs_lowest_common_ancestor + assert all(u == v == a for (u, v), a in ap_lca(G) if u == v) + MG = nx.MultiDiGraph(G) + assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v) + MG.add_edges_from([(1, 0), (2, 0)]) + assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_richclub.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_richclub.py new file mode 100644 index 0000000000000000000000000000000000000000..5638ddbf007a6075c3b87a1118ced18a4aa2b7a6 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_richclub.py @@ -0,0 +1,97 @@ +import pytest + +import networkx as nx + + +def test_richclub(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rc = nx.richclub.rich_club_coefficient(G, normalized=False) + assert rc == {0: 12.0 / 30, 1: 8.0 / 12} + + # test single value + rc0 = nx.richclub.rich_club_coefficient(G, normalized=False)[0] + assert rc0 == 12.0 / 30.0 + + +def test_richclub_seed(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2, seed=1) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub_normalized(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub2(): + T = nx.balanced_tree(2, 10) + rc = nx.richclub.rich_club_coefficient(T, normalized=False) + assert rc == { + 0: 4092 / (2047 * 2046.0), + 1: (2044.0 / (1023 * 1022)), + 2: (2040.0 / (1022 * 1021)), + } + + +def test_richclub3(): + # tests edgecase + G = nx.karate_club_graph() + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == { + 0: 156.0 / 1122, + 1: 154.0 / 1056, + 2: 110.0 / 462, + 3: 78.0 / 240, + 4: 44.0 / 90, + 5: 22.0 / 42, + 6: 10.0 / 20, + 7: 10.0 / 20, + 8: 10.0 / 20, + 9: 6.0 / 12, + 10: 2.0 / 6, + 11: 2.0 / 6, + 12: 0.0, + 13: 0.0, + 14: 0.0, + 15: 0.0, + } + + +def test_richclub4(): + G = nx.Graph() + G.add_edges_from( + [(0, 1), (0, 2), (0, 3), (0, 4), (4, 5), (5, 9), (6, 9), (7, 9), (8, 9)] + ) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 18 / 90.0, 1: 6 / 12.0, 2: 0.0, 3: 0.0} + + +def test_richclub_exception(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_exception2(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_selfloop(): + G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + G.add_edge(1, 1) # self loop + G.add_edge(1, 2) + with pytest.raises( + Exception, + match="rich_club_coefficient is not implemented for " "graphs with self loops.", + ): + nx.rich_club_coefficient(G) + + +# def test_richclub2_normalized(): +# T = nx.balanced_tree(2,10) +# rcNorm = nx.richclub.rich_club_coefficient(T,Q=2) +# assert_true(rcNorm[0] ==1.0 and rcNorm[1] < 0.9 and rcNorm[2] < 0.9) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_smetric.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_smetric.py new file mode 100644 index 0000000000000000000000000000000000000000..29389a7587264792b8b48186eae1c229178f3330 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_smetric.py @@ -0,0 +1,36 @@ +import warnings + +import pytest + +import networkx as nx + + +def test_smetric(): + g = nx.Graph() + g.add_edge(1, 2) + g.add_edge(2, 3) + g.add_edge(2, 4) + g.add_edge(1, 4) + sm = nx.s_metric(g, normalized=False) + assert sm == 19.0 + + +# NOTE: Tests below to be deleted when deprecation of `normalized` kwarg expires + + +def test_normalized_deprecation_warning(): + """Test that a deprecation warning is raised when s_metric is called with + a `normalized` kwarg.""" + G = nx.cycle_graph(7) + # No warning raised when called without kwargs (future behavior) + with warnings.catch_warnings(): + warnings.simplefilter("error") # Fail the test if warning caught + assert nx.s_metric(G) == 28 + + # Deprecation warning + with pytest.deprecated_call(): + nx.s_metric(G, normalized=True) + + # Make sure you get standard Python behavior when unrecognized keyword provided + with pytest.raises(TypeError): + nx.s_metric(G, normalize=True) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_structuralholes.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_structuralholes.py new file mode 100644 index 0000000000000000000000000000000000000000..6f92baa4f324595b747d4250611c37b807a4cbbf --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_structuralholes.py @@ -0,0 +1,139 @@ +"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module.""" +import math + +import pytest + +import networkx as nx +from networkx.classes.tests import dispatch_interface + + +class TestStructuralHoles: + """Unit tests for computing measures of structural holes. + + The expected values for these functions were originally computed using the + proprietary software `UCINET`_ and the free software `IGraph`_ , and then + computed by hand to make sure that the results are correct. + + .. _UCINET: https://sites.google.com/site/ucinetsoftware/home + .. _IGraph: http://igraph.org/ + + """ + + def setup_method(self): + self.D = nx.DiGraph() + self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1} + # Example from http://www.analytictech.com/connections/v20(1)/holes.htm + self.G = nx.Graph() + self.G.add_edges_from( + [ + ("A", "B"), + ("A", "F"), + ("A", "G"), + ("A", "E"), + ("E", "G"), + ("F", "G"), + ("B", "G"), + ("B", "D"), + ("D", "G"), + ("G", "C"), + ] + ) + self.G_weights = { + ("A", "B"): 2, + ("A", "F"): 3, + ("A", "G"): 5, + ("A", "E"): 2, + ("E", "G"): 8, + ("F", "G"): 3, + ("B", "G"): 4, + ("B", "D"): 1, + ("D", "G"): 3, + ("G", "C"): 10, + } + + # This additionally tests the @nx._dispatch mechanism, treating + # nx.mutual_weight as if it were a re-implementation from another package + @pytest.mark.parametrize("wrapper", [lambda x: x, dispatch_interface.convert]) + def test_constraint_directed(self, wrapper): + constraint = nx.constraint(wrapper(self.D)) + assert constraint[0] == pytest.approx(1.003, abs=1e-3) + assert constraint[1] == pytest.approx(1.003, abs=1e-3) + assert constraint[2] == pytest.approx(1.389, abs=1e-3) + + def test_effective_size_directed(self): + effective_size = nx.effective_size(self.D) + assert effective_size[0] == pytest.approx(1.167, abs=1e-3) + assert effective_size[1] == pytest.approx(1.167, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + constraint = nx.constraint(D, weight="weight") + assert constraint[0] == pytest.approx(0.840, abs=1e-3) + assert constraint[1] == pytest.approx(1.143, abs=1e-3) + assert constraint[2] == pytest.approx(1.378, abs=1e-3) + + def test_effective_size_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + effective_size = nx.effective_size(D, weight="weight") + assert effective_size[0] == pytest.approx(1.567, abs=1e-3) + assert effective_size[1] == pytest.approx(1.083, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_undirected(self): + constraint = nx.constraint(self.G) + assert constraint["G"] == pytest.approx(0.400, abs=1e-3) + assert constraint["A"] == pytest.approx(0.595, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_undirected_borgatti(self): + effective_size = nx.effective_size(self.G) + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_effective_size_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, 1, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + constraint = nx.constraint(G, weight="weight") + assert constraint["G"] == pytest.approx(0.299, abs=1e-3) + assert constraint["A"] == pytest.approx(0.795, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert effective_size["G"] == pytest.approx(5.47, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.47, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_isolated(self): + G = self.G.copy() + G.add_node(1) + constraint = nx.constraint(G) + assert math.isnan(constraint[1]) + + def test_effective_size_isolated(self): + G = self.G.copy() + G.add_node(1) + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert math.isnan(effective_size[1]) + + def test_effective_size_borgatti_isolated(self): + G = self.G.copy() + G.add_node(1) + effective_size = nx.effective_size(G) + assert math.isnan(effective_size[1]) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_swap.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_swap.py new file mode 100644 index 0000000000000000000000000000000000000000..49dd5f8e8c75f95a5650a563530fab67916becc9 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_swap.py @@ -0,0 +1,156 @@ +import pytest + +import networkx as nx + + +def test_directed_edge_swap(): + graph = nx.path_graph(200, create_using=nx.DiGraph) + in_degrees = sorted((n, d) for n, d in graph.in_degree()) + out_degrees = sorted((n, d) for n, d in graph.out_degree()) + G = nx.directed_edge_swap(graph, nswap=40, max_tries=500, seed=1) + assert in_degrees == sorted((n, d) for n, d in G.in_degree()) + assert out_degrees == sorted((n, d) for n, d in G.out_degree()) + + +def test_edge_cases_directed_edge_swap(): + # Tests cases when swaps are impossible, either too few edges exist, or self loops/cycles are unavoidable + # TODO: Rewrite function to explicitly check for impossible swaps and raise error + e = ( + "Maximum number of swap attempts \\(11\\) exceeded " + "before desired swaps achieved \\(\\d\\)." + ) + graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) + with pytest.raises(nx.NetworkXAlgorithmError, match=e): + nx.directed_edge_swap(graph, nswap=1, max_tries=10, seed=1) + + +def test_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_double_edge_swap_seed(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40, seed=1) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_low_window_threshold(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, _window_threshold=0, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star(): + # Testing ui==xi in connected_double_edge_swap + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star_low_window_threshold(): + # Testing ui==xi in connected_double_edge_swap with low window threshold + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, _window_threshold=0, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_directed_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.directed_edge_swap(nx.path_graph(3, create_using=nx.DiGraph)) + + +def test_directed_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.directed_edge_swap( + nx.path_graph(3, create_using=nx.DiGraph), nswap=1, max_tries=0 + ) + + +def test_directed_exception_undirected(): + graph = nx.Graph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.directed_edge_swap(graph) + + +def test_directed_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.directed_edge_swap( + nx.complete_graph(4, nx.DiGraph()), nswap=1, max_tries=5 + ) + + +def test_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(3)) + + +def test_double_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(10), nswap=1, max_tries=0) + + +def test_double_edge_directed(): + graph = nx.DiGraph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXError, match="not defined for directed graphs."): + G = nx.double_edge_swap(graph) + + +def test_double_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.double_edge_swap(nx.complete_graph(4), nswap=1, max_tries=5) + + +def test_connected_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.connected_double_edge_swap(nx.path_graph(3)) + + +def test_connected_double_edge_swap_not_connected(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(3) + nx.add_path(G, [10, 11, 12]) + G = nx.connected_double_edge_swap(G) + + +def test_degree_seq_c4(): + G = nx.cycle_graph(4) + degrees = sorted(d for n, d in G.degree()) + G = nx.double_edge_swap(G, 1, 100) + assert degrees == sorted(d for n, d in G.degree()) + + +def test_fewer_than_4_nodes(): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2]) + with pytest.raises(nx.NetworkXError, match=".*fewer than four nodes."): + nx.directed_edge_swap(G) + + +def test_less_than_3_edges(): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([3, 4]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"): + nx.directed_edge_swap(G) + + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 2 edges"): + nx.double_edge_swap(G) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_time_dependent.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_time_dependent.py new file mode 100644 index 0000000000000000000000000000000000000000..1e256f4bc69389464cfa164f209bc2db713b79ee --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_time_dependent.py @@ -0,0 +1,431 @@ +"""Unit testing for time dependent algorithms.""" + +from datetime import datetime, timedelta + +import pytest + +import networkx as nx + +_delta = timedelta(days=5 * 365) + + +class TestCdIndex: + """Unit testing for the cd index function.""" + + def test_common_graph(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=_delta) == 0.17 + + def test_common_graph_with_given_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"date": datetime(1992, 1, 1)}, + 1: {"date": datetime(1992, 1, 1)}, + 2: {"date": datetime(1993, 1, 1)}, + 3: {"date": datetime(1993, 1, 1)}, + 4: {"date": datetime(1995, 1, 1)}, + 5: {"date": datetime(1997, 1, 1)}, + 6: {"date": datetime(1998, 1, 1)}, + 7: {"date": datetime(1999, 1, 1)}, + 8: {"date": datetime(1999, 1, 1)}, + 9: {"date": datetime(1998, 1, 1)}, + 10: {"date": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=_delta, time="date") == 0.17 + + def test_common_graph_with_int_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20}, + 1: {"time": 20}, + 2: {"time": 30}, + 3: {"time": 30}, + 4: {"time": 50}, + 5: {"time": 70}, + 6: {"time": 80}, + 7: {"time": 90}, + 8: {"time": 90}, + 9: {"time": 80}, + 10: {"time": 74}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=50) == 0.17 + + def test_common_graph_with_float_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20.2}, + 1: {"time": 20.2}, + 2: {"time": 30.7}, + 3: {"time": 30.7}, + 4: {"time": 50.9}, + 5: {"time": 70.1}, + 6: {"time": 80.6}, + 7: {"time": 90.7}, + 8: {"time": 90.7}, + 9: {"time": 80.6}, + 10: {"time": 74.2}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=50) == 0.17 + + def test_common_graph_with_weights(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1), "weight": 5}, + 7: {"time": datetime(1999, 1, 1), "weight": 2}, + 8: {"time": datetime(1999, 1, 1), "weight": 6}, + 9: {"time": datetime(1998, 1, 1), "weight": 3}, + 10: {"time": datetime(1997, 4, 1), "weight": 10}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta, weight="weight") == 0.04 + + def test_node_with_no_predecessors(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(2005, 1, 1)}, + 6: {"time": datetime(2010, 1, 1)}, + 7: {"time": datetime(2001, 1, 1)}, + 8: {"time": datetime(2020, 1, 1)}, + 9: {"time": datetime(2017, 1, 1)}, + 10: {"time": datetime(2004, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta) == 0.0 + + def test_node_with_no_successors(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(8, 2) + G.add_edge(6, 0) + G.add_edge(6, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta) == 1.0 + + def test_n_equals_zero(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(2005, 1, 1)}, + 6: {"time": datetime(2010, 1, 1)}, + 7: {"time": datetime(2001, 1, 1)}, + 8: {"time": datetime(2020, 1, 1)}, + 9: {"time": datetime(2017, 1, 1)}, + 10: {"time": datetime(2004, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, match="The cd index cannot be defined." + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_time_timedelta_compatibility(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20.2}, + 1: {"time": 20.2}, + 2: {"time": 30.7}, + 3: {"time": 30.7}, + 4: {"time": 50.9}, + 5: {"time": 70.1}, + 6: {"time": 80.6}, + 7: {"time": 90.7}, + 8: {"time": 90.7}, + 9: {"time": 80.6}, + 10: {"time": 74.2}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, + match="Addition and comparison are not supported between", + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_node_with_no_time(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(8, 2) + G.add_edge(6, 0) + G.add_edge(6, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, match="Not all nodes have a 'time' attribute." + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_maximally_consolidating(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G.add_edge(5, 1) + G.add_edge(5, 2) + G.add_edge(5, 3) + G.add_edge(5, 4) + G.add_edge(6, 1) + G.add_edge(6, 5) + G.add_edge(7, 1) + G.add_edge(7, 5) + G.add_edge(8, 2) + G.add_edge(8, 5) + G.add_edge(9, 5) + G.add_edge(9, 3) + G.add_edge(10, 5) + G.add_edge(10, 3) + G.add_edge(10, 4) + G.add_edge(11, 5) + G.add_edge(11, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + 11: {"time": datetime(1998, 5, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 5, time_delta=_delta) == -1 + + def test_maximally_destabilizing(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G.add_edge(5, 1) + G.add_edge(5, 2) + G.add_edge(5, 3) + G.add_edge(5, 4) + G.add_edge(6, 5) + G.add_edge(7, 5) + G.add_edge(8, 5) + G.add_edge(9, 5) + G.add_edge(10, 5) + G.add_edge(11, 5) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + 11: {"time": datetime(1998, 5, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 5, time_delta=_delta) == 1 diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_voronoi.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_voronoi.py new file mode 100644 index 0000000000000000000000000000000000000000..3269ae62a023ff0cf9fdc55122cb6e7c8d2ba319 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/algorithms/tests/test_voronoi.py @@ -0,0 +1,103 @@ +import networkx as nx +from networkx.utils import pairwise + + +class TestVoronoiCells: + """Unit tests for the Voronoi cells function.""" + + def test_isolates(self): + """Tests that a graph with isolated nodes has all isolates in + one block of the partition. + + """ + G = nx.empty_graph(5) + cells = nx.voronoi_cells(G, {0, 2, 4}) + expected = {0: {0}, 2: {2}, 4: {4}, "unreachable": {1, 3}} + assert expected == cells + + def test_undirected_unweighted(self): + G = nx.cycle_graph(6) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 5}, 3: {2, 3, 4}} + assert expected == cells + + def test_directed_unweighted(self): + # This is the singly-linked directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 2}, 3: {3, 4, 5}} + assert expected == cells + + def test_directed_inward(self): + """Tests that reversing the graph gives the "inward" Voronoi + partition. + + """ + # This is the singly-linked reverse directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + G = G.reverse(copy=False) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 4, 5}, 3: {1, 2, 3}} + assert expected == cells + + def test_undirected_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_directed_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1), (3, 2, 1), (2, 1, 1)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multigraph_unweighted(self): + """Tests that the Voronoi cells for a multigraph are the same as + for a simple graph. + + """ + edges = [(0, 1), (1, 2), (2, 3)] + G = nx.MultiGraph(2 * edges) + H = nx.Graph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multidigraph_unweighted(self): + # This is the twice-singly-linked directed cycle graph on six nodes. + edges = list(pairwise(range(6), cyclic=True)) + G = nx.MultiDiGraph(2 * edges) + H = nx.DiGraph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multigraph_weighted(self): + edges = [(0, 1, 10), (0, 1, 10), (1, 2, 1), (1, 2, 100), (2, 3, 1), (2, 3, 100)] + G = nx.MultiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multidigraph_weighted(self): + edges = [ + (0, 1, 10), + (0, 1, 10), + (1, 2, 1), + (2, 3, 1), + (3, 2, 10), + (3, 2, 1), + (2, 1, 10), + (2, 1, 1), + ] + G = nx.MultiDiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0db16680aa58a1fdafdf16157513eab356cae57c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd994ace38d10110cb2b65a237de64d41cce527 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0977efc2457e1e69fecf3654a6c4ad1b271bb7d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..329c0f6299d42ce579209ff6c90435bfbbaf86d1 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee8abaf0636d96591e98423fdc751ec62d9900fc Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae519619a6d1fb7794373580be862cf1433373ad Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70636efa653f17c87872b5ba33fb09fd65a03cfe Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03a92ef402c73b1d2c753072035e708508bbe3bb Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2cf06681f5c7f926067c844d49f54bd7cbccc67 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61906458f77f72ad01929a339a27a529e1665b96 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efbce695e0445e91f98d8c5a8b8f8807efe79e5f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/exceptions.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/exceptions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0abb4b1b9c19b7e3d8de285eff4b0cbe46dcfd7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/exceptions.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e589bb917e23823e25f9fff7e0849c4d6d4a62bc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__init__.py @@ -0,0 +1,4 @@ +"""Subpackage containing all of pip's command line interface related code +""" + +# This file intentionally does not import submodules diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d7911b76f1aa5991b7a1b3ce5ba44e611d4fb63 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a769b810674d2fa89140f9430c08eb282fbf31d Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7798eb3ac06ec6094cebb15f2eb8c8a124ecf762 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79275944e8fbd4b503895a46a3cfa6854c9af26f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a937241f9ed77ba4b9c7fb88c8b268a60f468705 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0578ed13f771b5ff2aa8aa8d707cd8ee1505b157 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/base_command.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/base_command.py new file mode 100644 index 0000000000000000000000000000000000000000..bc1ab65949d8edace4e3f7938296bd32e963f06b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/base_command.py @@ -0,0 +1,231 @@ +"""Base Command class, and related routines""" + +import logging +import logging.config +import optparse +import os +import sys +import traceback +from optparse import Values +from typing import List, Optional, Tuple + +from pip._vendor.rich import reconfigure +from pip._vendor.rich import traceback as rich_traceback + +from pip._internal.cli import cmdoptions +from pip._internal.cli.command_context import CommandContextMixIn +from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter +from pip._internal.cli.status_codes import ( + ERROR, + PREVIOUS_BUILD_DIR_ERROR, + UNKNOWN_ERROR, + VIRTUALENV_NOT_FOUND, +) +from pip._internal.exceptions import ( + BadCommand, + CommandError, + DiagnosticPipError, + InstallationError, + NetworkConnectionError, + PreviousBuildDirError, +) +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging +from pip._internal.utils.misc import get_prog, normalize_path +from pip._internal.utils.temp_dir import TempDirectoryTypeRegistry as TempDirRegistry +from pip._internal.utils.temp_dir import global_tempdir_manager, tempdir_registry +from pip._internal.utils.virtualenv import running_under_virtualenv + +__all__ = ["Command"] + +logger = logging.getLogger(__name__) + + +class Command(CommandContextMixIn): + usage: str = "" + ignore_require_venv: bool = False + + def __init__(self, name: str, summary: str, isolated: bool = False) -> None: + super().__init__() + + self.name = name + self.summary = summary + self.parser = ConfigOptionParser( + usage=self.usage, + prog=f"{get_prog()} {name}", + formatter=UpdatingDefaultsHelpFormatter(), + add_help_option=False, + name=name, + description=self.__doc__, + isolated=isolated, + ) + + self.tempdir_registry: Optional[TempDirRegistry] = None + + # Commands should add options to this option group + optgroup_name = f"{self.name.capitalize()} Options" + self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) + + # Add the general options + gen_opts = cmdoptions.make_option_group( + cmdoptions.general_group, + self.parser, + ) + self.parser.add_option_group(gen_opts) + + self.add_options() + + def add_options(self) -> None: + pass + + def handle_pip_version_check(self, options: Values) -> None: + """ + This is a no-op so that commands by default do not do the pip version + check. + """ + # Make sure we do the pip version check if the index_group options + # are present. + assert not hasattr(options, "no_index") + + def run(self, options: Values, args: List[str]) -> int: + raise NotImplementedError + + def _run_wrapper(self, level_number: int, options: Values, args: List[str]) -> int: + def _inner_run() -> int: + try: + return self.run(options, args) + finally: + self.handle_pip_version_check(options) + + if options.debug_mode: + rich_traceback.install(show_locals=True) + return _inner_run() + + try: + status = _inner_run() + assert isinstance(status, int) + return status + except DiagnosticPipError as exc: + logger.error("%s", exc, extra={"rich": True}) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except PreviousBuildDirError as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return PREVIOUS_BUILD_DIR_ERROR + except ( + InstallationError, + BadCommand, + NetworkConnectionError, + ) as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except CommandError as exc: + logger.critical("%s", exc) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BrokenStdoutLoggingError: + # Bypass our logger and write any remaining messages to + # stderr because stdout no longer works. + print("ERROR: Pipe to stdout was broken", file=sys.stderr) + if level_number <= logging.DEBUG: + traceback.print_exc(file=sys.stderr) + + return ERROR + except KeyboardInterrupt: + logger.critical("Operation cancelled by user") + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BaseException: + logger.critical("Exception:", exc_info=True) + + return UNKNOWN_ERROR + + def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]: + # factored out for testability + return self.parser.parse_args(args) + + def main(self, args: List[str]) -> int: + try: + with self.main_context(): + return self._main(args) + finally: + logging.shutdown() + + def _main(self, args: List[str]) -> int: + # We must initialize this before the tempdir manager, otherwise the + # configuration would not be accessible by the time we clean up the + # tempdir manager. + self.tempdir_registry = self.enter_context(tempdir_registry()) + # Intentionally set as early as possible so globally-managed temporary + # directories are available to the rest of the code. + self.enter_context(global_tempdir_manager()) + + options, args = self.parse_args(args) + + # Set verbosity so that it can be used elsewhere. + self.verbosity = options.verbose - options.quiet + + reconfigure(no_color=options.no_color) + level_number = setup_logging( + verbosity=self.verbosity, + no_color=options.no_color, + user_log_file=options.log, + ) + + always_enabled_features = set(options.features_enabled) & set( + cmdoptions.ALWAYS_ENABLED_FEATURES + ) + if always_enabled_features: + logger.warning( + "The following features are always enabled: %s. ", + ", ".join(sorted(always_enabled_features)), + ) + + # Make sure that the --python argument isn't specified after the + # subcommand. We can tell, because if --python was specified, + # we should only reach this point if we're running in the created + # subprocess, which has the _PIP_RUNNING_IN_SUBPROCESS environment + # variable set. + if options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: + logger.critical( + "The --python option must be placed before the pip subcommand name" + ) + sys.exit(ERROR) + + # TODO: Try to get these passing down from the command? + # without resorting to os.environ to hold these. + # This also affects isolated builds and it should. + + if options.no_input: + os.environ["PIP_NO_INPUT"] = "1" + + if options.exists_action: + os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action) + + if options.require_venv and not self.ignore_require_venv: + # If a venv is required check if it can really be found + if not running_under_virtualenv(): + logger.critical("Could not find an activated virtualenv (required).") + sys.exit(VIRTUALENV_NOT_FOUND) + + if options.cache_dir: + options.cache_dir = normalize_path(options.cache_dir) + if not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "or is not writable by the current user. The cache " + "has been disabled. Check the permissions and owner of " + "that directory. If executing pip with sudo, you should " + "use sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + return self._run_wrapper(level_number, options, args) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/command_context.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/command_context.py new file mode 100644 index 0000000000000000000000000000000000000000..139995ac3f109a82664e4913f7ebc32ecf7617e1 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/command_context.py @@ -0,0 +1,27 @@ +from contextlib import ExitStack, contextmanager +from typing import ContextManager, Generator, TypeVar + +_T = TypeVar("_T", covariant=True) + + +class CommandContextMixIn: + def __init__(self) -> None: + super().__init__() + self._in_main_context = False + self._main_context = ExitStack() + + @contextmanager + def main_context(self) -> Generator[None, None, None]: + assert not self._in_main_context + + self._in_main_context = True + try: + with self._main_context: + yield + finally: + self._in_main_context = False + + def enter_context(self, context_provider: ContextManager[_T]) -> _T: + assert self._in_main_context + + return self._main_context.enter_context(context_provider) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/index_command.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/index_command.py new file mode 100644 index 0000000000000000000000000000000000000000..db105d0fef957a3fbd4fc919cbe8f387793f7342 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/index_command.py @@ -0,0 +1,170 @@ +""" +Contains command classes which may interact with an index / the network. + +Unlike its sister module, req_command, this module still uses lazy imports +so commands which don't always hit the network (e.g. list w/o --outdated or +--uptodate) don't need waste time importing PipSession and friends. +""" + +import logging +import os +import sys +from optparse import Values +from typing import TYPE_CHECKING, List, Optional + +from pip._vendor import certifi + +from pip._internal.cli.base_command import Command +from pip._internal.cli.command_context import CommandContextMixIn + +if TYPE_CHECKING: + from ssl import SSLContext + + from pip._internal.network.session import PipSession + +logger = logging.getLogger(__name__) + + +def _create_truststore_ssl_context() -> Optional["SSLContext"]: + if sys.version_info < (3, 10): + logger.debug("Disabling truststore because Python version isn't 3.10+") + return None + + try: + import ssl + except ImportError: + logger.warning("Disabling truststore since ssl support is missing") + return None + + try: + from pip._vendor import truststore + except ImportError: + logger.warning("Disabling truststore because platform isn't supported") + return None + + ctx = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + ctx.load_verify_locations(certifi.where()) + return ctx + + +class SessionCommandMixin(CommandContextMixIn): + """ + A class mixin for command classes needing _build_session(). + """ + + def __init__(self) -> None: + super().__init__() + self._session: Optional[PipSession] = None + + @classmethod + def _get_index_urls(cls, options: Values) -> Optional[List[str]]: + """Return a list of index urls from user-provided options.""" + index_urls = [] + if not getattr(options, "no_index", False): + url = getattr(options, "index_url", None) + if url: + index_urls.append(url) + urls = getattr(options, "extra_index_urls", None) + if urls: + index_urls.extend(urls) + # Return None rather than an empty list + return index_urls or None + + def get_default_session(self, options: Values) -> "PipSession": + """Get a default-managed session.""" + if self._session is None: + self._session = self.enter_context(self._build_session(options)) + # there's no type annotation on requests.Session, so it's + # automatically ContextManager[Any] and self._session becomes Any, + # then https://github.com/python/mypy/issues/7696 kicks in + assert self._session is not None + return self._session + + def _build_session( + self, + options: Values, + retries: Optional[int] = None, + timeout: Optional[int] = None, + ) -> "PipSession": + from pip._internal.network.session import PipSession + + cache_dir = options.cache_dir + assert not cache_dir or os.path.isabs(cache_dir) + + if "legacy-certs" not in options.deprecated_features_enabled: + ssl_context = _create_truststore_ssl_context() + else: + ssl_context = None + + session = PipSession( + cache=os.path.join(cache_dir, "http-v2") if cache_dir else None, + retries=retries if retries is not None else options.retries, + trusted_hosts=options.trusted_hosts, + index_urls=self._get_index_urls(options), + ssl_context=ssl_context, + ) + + # Handle custom ca-bundles from the user + if options.cert: + session.verify = options.cert + + # Handle SSL client certificate + if options.client_cert: + session.cert = options.client_cert + + # Handle timeouts + if options.timeout or timeout: + session.timeout = timeout if timeout is not None else options.timeout + + # Handle configured proxies + if options.proxy: + session.proxies = { + "http": options.proxy, + "https": options.proxy, + } + session.trust_env = False + + # Determine if we can prompt the user for authentication or not + session.auth.prompting = not options.no_input + session.auth.keyring_provider = options.keyring_provider + + return session + + +def _pip_self_version_check(session: "PipSession", options: Values) -> None: + from pip._internal.self_outdated_check import pip_self_version_check as check + + check(session, options) + + +class IndexGroupCommand(Command, SessionCommandMixin): + """ + Abstract base class for commands with the index_group options. + + This also corresponds to the commands that permit the pip version check. + """ + + def handle_pip_version_check(self, options: Values) -> None: + """ + Do the pip version check if not disabled. + + This overrides the default behavior of not doing the check. + """ + # Make sure the index_group options are present. + assert hasattr(options, "no_index") + + if options.disable_pip_version_check or options.no_index: + return + + try: + # Otherwise, check if we're using the latest version of pip available. + session = self._build_session( + options, + retries=0, + timeout=min(5, options.timeout), + ) + with session: + _pip_self_version_check(session, options) + except Exception: + logger.warning("There was an error checking the latest version of pip.") + logger.debug("See below for error", exc_info=True) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/parser.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..bc4aca032d4adddbb7b9d8b0c981187aa11e40fc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/parser.py @@ -0,0 +1,294 @@ +"""Base option parser setup""" + +import logging +import optparse +import shutil +import sys +import textwrap +from contextlib import suppress +from typing import Any, Dict, Generator, List, NoReturn, Optional, Tuple + +from pip._internal.cli.status_codes import UNKNOWN_ERROR +from pip._internal.configuration import Configuration, ConfigurationError +from pip._internal.utils.misc import redact_auth_from_url, strtobool + +logger = logging.getLogger(__name__) + + +class PrettyHelpFormatter(optparse.IndentedHelpFormatter): + """A prettier/less verbose help formatter for optparse.""" + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # help position must be aligned with __init__.parseopts.description + kwargs["max_help_position"] = 30 + kwargs["indent_increment"] = 1 + kwargs["width"] = shutil.get_terminal_size()[0] - 2 + super().__init__(*args, **kwargs) + + def format_option_strings(self, option: optparse.Option) -> str: + return self._format_option_strings(option) + + def _format_option_strings( + self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", " + ) -> str: + """ + Return a comma-separated list of option strings and metavars. + + :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') + :param mvarfmt: metavar format string + :param optsep: separator + """ + opts = [] + + if option._short_opts: + opts.append(option._short_opts[0]) + if option._long_opts: + opts.append(option._long_opts[0]) + if len(opts) > 1: + opts.insert(1, optsep) + + if option.takes_value(): + assert option.dest is not None + metavar = option.metavar or option.dest.lower() + opts.append(mvarfmt.format(metavar.lower())) + + return "".join(opts) + + def format_heading(self, heading: str) -> str: + if heading == "Options": + return "" + return heading + ":\n" + + def format_usage(self, usage: str) -> str: + """ + Ensure there is only one newline between usage and the first heading + if there is no description. + """ + msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " ")) + return msg + + def format_description(self, description: Optional[str]) -> str: + # leave full control over description to us + if description: + if hasattr(self.parser, "main"): + label = "Commands" + else: + label = "Description" + # some doc strings have initial newlines, some don't + description = description.lstrip("\n") + # some doc strings have final newlines and spaces, some don't + description = description.rstrip() + # dedent, then reindent + description = self.indent_lines(textwrap.dedent(description), " ") + description = f"{label}:\n{description}\n" + return description + else: + return "" + + def format_epilog(self, epilog: Optional[str]) -> str: + # leave full control over epilog to us + if epilog: + return epilog + else: + return "" + + def indent_lines(self, text: str, indent: str) -> str: + new_lines = [indent + line for line in text.split("\n")] + return "\n".join(new_lines) + + +class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): + """Custom help formatter for use in ConfigOptionParser. + + This is updates the defaults before expanding them, allowing + them to show up correctly in the help listing. + + Also redact auth from url type options + """ + + def expand_default(self, option: optparse.Option) -> str: + default_values = None + if self.parser is not None: + assert isinstance(self.parser, ConfigOptionParser) + self.parser._update_defaults(self.parser.defaults) + assert option.dest is not None + default_values = self.parser.defaults.get(option.dest) + help_text = super().expand_default(option) + + if default_values and option.metavar == "URL": + if isinstance(default_values, str): + default_values = [default_values] + + # If its not a list, we should abort and just return the help text + if not isinstance(default_values, list): + default_values = [] + + for val in default_values: + help_text = help_text.replace(val, redact_auth_from_url(val)) + + return help_text + + +class CustomOptionParser(optparse.OptionParser): + def insert_option_group( + self, idx: int, *args: Any, **kwargs: Any + ) -> optparse.OptionGroup: + """Insert an OptionGroup at a given position.""" + group = self.add_option_group(*args, **kwargs) + + self.option_groups.pop() + self.option_groups.insert(idx, group) + + return group + + @property + def option_list_all(self) -> List[optparse.Option]: + """Get a list of all options, including those in option groups.""" + res = self.option_list[:] + for i in self.option_groups: + res.extend(i.option_list) + + return res + + +class ConfigOptionParser(CustomOptionParser): + """Custom option parser which updates its defaults by checking the + configuration files and environmental variables""" + + def __init__( + self, + *args: Any, + name: str, + isolated: bool = False, + **kwargs: Any, + ) -> None: + self.name = name + self.config = Configuration(isolated) + + assert self.name + super().__init__(*args, **kwargs) + + def check_default(self, option: optparse.Option, key: str, val: Any) -> Any: + try: + return option.check_value(key, val) + except optparse.OptionValueError as exc: + print(f"An error occurred during configuration: {exc}") + sys.exit(3) + + def _get_ordered_configuration_items( + self, + ) -> Generator[Tuple[str, Any], None, None]: + # Configuration gives keys in an unordered manner. Order them. + override_order = ["global", self.name, ":env:"] + + # Pool the options into different groups + section_items: Dict[str, List[Tuple[str, Any]]] = { + name: [] for name in override_order + } + for section_key, val in self.config.items(): + # ignore empty values + if not val: + logger.debug( + "Ignoring configuration key '%s' as it's value is empty.", + section_key, + ) + continue + + section, key = section_key.split(".", 1) + if section in override_order: + section_items[section].append((key, val)) + + # Yield each group in their override order + for section in override_order: + for key, val in section_items[section]: + yield key, val + + def _update_defaults(self, defaults: Dict[str, Any]) -> Dict[str, Any]: + """Updates the given defaults with values from the config files and + the environ. Does a little special handling for certain types of + options (lists).""" + + # Accumulate complex default state. + self.values = optparse.Values(self.defaults) + late_eval = set() + # Then set the options with those values + for key, val in self._get_ordered_configuration_items(): + # '--' because configuration supports only long names + option = self.get_option("--" + key) + + # Ignore options not present in this parser. E.g. non-globals put + # in [global] by users that want them to apply to all applicable + # commands. + if option is None: + continue + + assert option.dest is not None + + if option.action in ("store_true", "store_false"): + try: + val = strtobool(val) + except ValueError: + self.error( + f"{val} is not a valid value for {key} option, " + "please specify a boolean value like yes/no, " + "true/false or 1/0 instead." + ) + elif option.action == "count": + with suppress(ValueError): + val = strtobool(val) + with suppress(ValueError): + val = int(val) + if not isinstance(val, int) or val < 0: + self.error( + f"{val} is not a valid value for {key} option, " + "please instead specify either a non-negative integer " + "or a boolean value like yes/no or false/true " + "which is equivalent to 1/0." + ) + elif option.action == "append": + val = val.split() + val = [self.check_default(option, key, v) for v in val] + elif option.action == "callback": + assert option.callback is not None + late_eval.add(option.dest) + opt_str = option.get_opt_string() + val = option.convert_value(opt_str, val) + # From take_action + args = option.callback_args or () + kwargs = option.callback_kwargs or {} + option.callback(option, opt_str, val, self, *args, **kwargs) + else: + val = self.check_default(option, key, val) + + defaults[option.dest] = val + + for key in late_eval: + defaults[key] = getattr(self.values, key) + self.values = None + return defaults + + def get_default_values(self) -> optparse.Values: + """Overriding to make updating the defaults after instantiation of + the option parser possible, _update_defaults() does the dirty work.""" + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return optparse.Values(self.defaults) + + # Load the configuration, or error out in case of an error + try: + self.config.load() + except ConfigurationError as err: + self.exit(UNKNOWN_ERROR, str(err)) + + defaults = self._update_defaults(self.defaults.copy()) # ours + for option in self._get_all_options(): + assert option.dest is not None + default = defaults.get(option.dest) + if isinstance(default, str): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + return optparse.Values(defaults) + + def error(self, msg: str) -> NoReturn: + self.print_usage(sys.stderr) + self.exit(UNKNOWN_ERROR, f"{msg}\n") diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/progress_bars.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/progress_bars.py new file mode 100644 index 0000000000000000000000000000000000000000..1236180c08691f2bd6050f1cb0893359ce58397f --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/progress_bars.py @@ -0,0 +1,94 @@ +import functools +import sys +from typing import Callable, Generator, Iterable, Iterator, Optional, Tuple + +from pip._vendor.rich.progress import ( + BarColumn, + DownloadColumn, + FileSizeColumn, + Progress, + ProgressColumn, + SpinnerColumn, + TextColumn, + TimeElapsedColumn, + TimeRemainingColumn, + TransferSpeedColumn, +) + +from pip._internal.cli.spinners import RateLimiter +from pip._internal.utils.logging import get_indentation + +DownloadProgressRenderer = Callable[[Iterable[bytes]], Iterator[bytes]] + + +def _rich_progress_bar( + iterable: Iterable[bytes], + *, + bar_type: str, + size: Optional[int], +) -> Generator[bytes, None, None]: + assert bar_type == "on", "This should only be used in the default mode." + + if not size: + total = float("inf") + columns: Tuple[ProgressColumn, ...] = ( + TextColumn("[progress.description]{task.description}"), + SpinnerColumn("line", speed=1.5), + FileSizeColumn(), + TransferSpeedColumn(), + TimeElapsedColumn(), + ) + else: + total = size + columns = ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + DownloadColumn(), + TransferSpeedColumn(), + TextColumn("eta"), + TimeRemainingColumn(), + ) + + progress = Progress(*columns, refresh_per_second=5) + task_id = progress.add_task(" " * (get_indentation() + 2), total=total) + with progress: + for chunk in iterable: + yield chunk + progress.update(task_id, advance=len(chunk)) + + +def _raw_progress_bar( + iterable: Iterable[bytes], + *, + size: Optional[int], +) -> Generator[bytes, None, None]: + def write_progress(current: int, total: int) -> None: + sys.stdout.write("Progress %d of %d\n" % (current, total)) + sys.stdout.flush() + + current = 0 + total = size or 0 + rate_limiter = RateLimiter(0.25) + + write_progress(current, total) + for chunk in iterable: + current += len(chunk) + if rate_limiter.ready() or current == total: + write_progress(current, total) + rate_limiter.reset() + yield chunk + + +def get_download_progress_renderer( + *, bar_type: str, size: Optional[int] = None +) -> DownloadProgressRenderer: + """Get an object that can be used to render the download progress. + + Returns a callable, that takes an iterable to "wrap". + """ + if bar_type == "on": + return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size) + elif bar_type == "raw": + return functools.partial(_raw_progress_bar, size=size) + else: + return iter # no-op, when passed an iterator diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/req_command.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/req_command.py new file mode 100644 index 0000000000000000000000000000000000000000..92900f94ff4cb4b43126ca92964655fcb9016f50 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/req_command.py @@ -0,0 +1,329 @@ +"""Contains the RequirementCommand base class. + +This class is in a separate module so the commands that do not always +need PackageFinder capability don't unnecessarily import the +PackageFinder machinery and all its vendored dependencies, etc. +""" + +import logging +from functools import partial +from optparse import Values +from typing import Any, List, Optional, Tuple + +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.index_command import IndexGroupCommand +from pip._internal.cli.index_command import SessionCommandMixin as SessionCommandMixin +from pip._internal.exceptions import CommandError, PreviousBuildDirError +from pip._internal.index.collector import LinkCollector +from pip._internal.index.package_finder import PackageFinder +from pip._internal.models.selection_prefs import SelectionPreferences +from pip._internal.models.target_python import TargetPython +from pip._internal.network.session import PipSession +from pip._internal.operations.build.build_tracker import BuildTracker +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.constructors import ( + install_req_from_editable, + install_req_from_line, + install_req_from_parsed_requirement, + install_req_from_req_string, +) +from pip._internal.req.req_file import parse_requirements +from pip._internal.req.req_install import InstallRequirement +from pip._internal.resolution.base import BaseResolver +from pip._internal.utils.temp_dir import ( + TempDirectory, + TempDirectoryTypeRegistry, + tempdir_kinds, +) + +logger = logging.getLogger(__name__) + + +KEEPABLE_TEMPDIR_TYPES = [ + tempdir_kinds.BUILD_ENV, + tempdir_kinds.EPHEM_WHEEL_CACHE, + tempdir_kinds.REQ_BUILD, +] + + +def with_cleanup(func: Any) -> Any: + """Decorator for common logic related to managing temporary + directories. + """ + + def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None: + for t in KEEPABLE_TEMPDIR_TYPES: + registry.set_delete(t, False) + + def wrapper( + self: RequirementCommand, options: Values, args: List[Any] + ) -> Optional[int]: + assert self.tempdir_registry is not None + if options.no_clean: + configure_tempdir_registry(self.tempdir_registry) + + try: + return func(self, options, args) + except PreviousBuildDirError: + # This kind of conflict can occur when the user passes an explicit + # build directory with a pre-existing folder. In that case we do + # not want to accidentally remove it. + configure_tempdir_registry(self.tempdir_registry) + raise + + return wrapper + + +class RequirementCommand(IndexGroupCommand): + def __init__(self, *args: Any, **kw: Any) -> None: + super().__init__(*args, **kw) + + self.cmd_opts.add_option(cmdoptions.no_clean()) + + @staticmethod + def determine_resolver_variant(options: Values) -> str: + """Determines which resolver should be used, based on the given options.""" + if "legacy-resolver" in options.deprecated_features_enabled: + return "legacy" + + return "resolvelib" + + @classmethod + def make_requirement_preparer( + cls, + temp_build_dir: TempDirectory, + options: Values, + build_tracker: BuildTracker, + session: PipSession, + finder: PackageFinder, + use_user_site: bool, + download_dir: Optional[str] = None, + verbosity: int = 0, + ) -> RequirementPreparer: + """ + Create a RequirementPreparer instance for the given parameters. + """ + temp_build_dir_path = temp_build_dir.path + assert temp_build_dir_path is not None + legacy_resolver = False + + resolver_variant = cls.determine_resolver_variant(options) + if resolver_variant == "resolvelib": + lazy_wheel = "fast-deps" in options.features_enabled + if lazy_wheel: + logger.warning( + "pip is using lazily downloaded wheels using HTTP " + "range requests to obtain dependency information. " + "This experimental feature is enabled through " + "--use-feature=fast-deps and it is not ready for " + "production." + ) + else: + legacy_resolver = True + lazy_wheel = False + if "fast-deps" in options.features_enabled: + logger.warning( + "fast-deps has no effect when used with the legacy resolver." + ) + + return RequirementPreparer( + build_dir=temp_build_dir_path, + src_dir=options.src_dir, + download_dir=download_dir, + build_isolation=options.build_isolation, + check_build_deps=options.check_build_deps, + build_tracker=build_tracker, + session=session, + progress_bar=options.progress_bar, + finder=finder, + require_hashes=options.require_hashes, + use_user_site=use_user_site, + lazy_wheel=lazy_wheel, + verbosity=verbosity, + legacy_resolver=legacy_resolver, + ) + + @classmethod + def make_resolver( + cls, + preparer: RequirementPreparer, + finder: PackageFinder, + options: Values, + wheel_cache: Optional[WheelCache] = None, + use_user_site: bool = False, + ignore_installed: bool = True, + ignore_requires_python: bool = False, + force_reinstall: bool = False, + upgrade_strategy: str = "to-satisfy-only", + use_pep517: Optional[bool] = None, + py_version_info: Optional[Tuple[int, ...]] = None, + ) -> BaseResolver: + """ + Create a Resolver instance for the given parameters. + """ + make_install_req = partial( + install_req_from_req_string, + isolated=options.isolated_mode, + use_pep517=use_pep517, + ) + resolver_variant = cls.determine_resolver_variant(options) + # The long import name and duplicated invocation is needed to convince + # Mypy into correctly typechecking. Otherwise it would complain the + # "Resolver" class being redefined. + if resolver_variant == "resolvelib": + import pip._internal.resolution.resolvelib.resolver + + return pip._internal.resolution.resolvelib.resolver.Resolver( + preparer=preparer, + finder=finder, + wheel_cache=wheel_cache, + make_install_req=make_install_req, + use_user_site=use_user_site, + ignore_dependencies=options.ignore_dependencies, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + force_reinstall=force_reinstall, + upgrade_strategy=upgrade_strategy, + py_version_info=py_version_info, + ) + import pip._internal.resolution.legacy.resolver + + return pip._internal.resolution.legacy.resolver.Resolver( + preparer=preparer, + finder=finder, + wheel_cache=wheel_cache, + make_install_req=make_install_req, + use_user_site=use_user_site, + ignore_dependencies=options.ignore_dependencies, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + force_reinstall=force_reinstall, + upgrade_strategy=upgrade_strategy, + py_version_info=py_version_info, + ) + + def get_requirements( + self, + args: List[str], + options: Values, + finder: PackageFinder, + session: PipSession, + ) -> List[InstallRequirement]: + """ + Parse command-line arguments into the corresponding requirements. + """ + requirements: List[InstallRequirement] = [] + for filename in options.constraints: + for parsed_req in parse_requirements( + filename, + constraint=True, + finder=finder, + options=options, + session=session, + ): + req_to_add = install_req_from_parsed_requirement( + parsed_req, + isolated=options.isolated_mode, + user_supplied=False, + ) + requirements.append(req_to_add) + + for req in args: + req_to_add = install_req_from_line( + req, + comes_from=None, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + user_supplied=True, + config_settings=getattr(options, "config_settings", None), + ) + requirements.append(req_to_add) + + for req in options.editables: + req_to_add = install_req_from_editable( + req, + user_supplied=True, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + config_settings=getattr(options, "config_settings", None), + ) + requirements.append(req_to_add) + + # NOTE: options.require_hashes may be set if --require-hashes is True + for filename in options.requirements: + for parsed_req in parse_requirements( + filename, finder=finder, options=options, session=session + ): + req_to_add = install_req_from_parsed_requirement( + parsed_req, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + user_supplied=True, + config_settings=( + parsed_req.options.get("config_settings") + if parsed_req.options + else None + ), + ) + requirements.append(req_to_add) + + # If any requirement has hash options, enable hash checking. + if any(req.has_hash_options for req in requirements): + options.require_hashes = True + + if not (args or options.editables or options.requirements): + opts = {"name": self.name} + if options.find_links: + raise CommandError( + "You must give at least one requirement to {name} " + '(maybe you meant "pip {name} {links}"?)'.format( + **dict(opts, links=" ".join(options.find_links)) + ) + ) + else: + raise CommandError( + "You must give at least one requirement to {name} " + '(see "pip help {name}")'.format(**opts) + ) + + return requirements + + @staticmethod + def trace_basic_info(finder: PackageFinder) -> None: + """ + Trace basic information about the provided objects. + """ + # Display where finder is looking for packages + search_scope = finder.search_scope + locations = search_scope.get_formatted_locations() + if locations: + logger.info(locations) + + def _build_package_finder( + self, + options: Values, + session: PipSession, + target_python: Optional[TargetPython] = None, + ignore_requires_python: Optional[bool] = None, + ) -> PackageFinder: + """ + Create a package finder appropriate to this requirement command. + + :param ignore_requires_python: Whether to ignore incompatible + "Requires-Python" values in links. Defaults to False. + """ + link_collector = LinkCollector.create(session, options=options) + selection_prefs = SelectionPreferences( + allow_yanked=True, + format_control=options.format_control, + allow_all_prereleases=options.pre, + prefer_binary=options.prefer_binary, + ignore_requires_python=ignore_requires_python, + ) + + return PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + target_python=target_python, + ) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/spinners.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/spinners.py new file mode 100644 index 0000000000000000000000000000000000000000..cf2b976f377c2656afb3d84add8d30b0fc280c03 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/spinners.py @@ -0,0 +1,159 @@ +import contextlib +import itertools +import logging +import sys +import time +from typing import IO, Generator, Optional + +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.logging import get_indentation + +logger = logging.getLogger(__name__) + + +class SpinnerInterface: + def spin(self) -> None: + raise NotImplementedError() + + def finish(self, final_status: str) -> None: + raise NotImplementedError() + + +class InteractiveSpinner(SpinnerInterface): + def __init__( + self, + message: str, + file: Optional[IO[str]] = None, + spin_chars: str = "-\\|/", + # Empirically, 8 updates/second looks nice + min_update_interval_seconds: float = 0.125, + ): + self._message = message + if file is None: + file = sys.stdout + self._file = file + self._rate_limiter = RateLimiter(min_update_interval_seconds) + self._finished = False + + self._spin_cycle = itertools.cycle(spin_chars) + + self._file.write(" " * get_indentation() + self._message + " ... ") + self._width = 0 + + def _write(self, status: str) -> None: + assert not self._finished + # Erase what we wrote before by backspacing to the beginning, writing + # spaces to overwrite the old text, and then backspacing again + backup = "\b" * self._width + self._file.write(backup + " " * self._width + backup) + # Now we have a blank slate to add our status + self._file.write(status) + self._width = len(status) + self._file.flush() + self._rate_limiter.reset() + + def spin(self) -> None: + if self._finished: + return + if not self._rate_limiter.ready(): + return + self._write(next(self._spin_cycle)) + + def finish(self, final_status: str) -> None: + if self._finished: + return + self._write(final_status) + self._file.write("\n") + self._file.flush() + self._finished = True + + +# Used for dumb terminals, non-interactive installs (no tty), etc. +# We still print updates occasionally (once every 60 seconds by default) to +# act as a keep-alive for systems like Travis-CI that take lack-of-output as +# an indication that a task has frozen. +class NonInteractiveSpinner(SpinnerInterface): + def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None: + self._message = message + self._finished = False + self._rate_limiter = RateLimiter(min_update_interval_seconds) + self._update("started") + + def _update(self, status: str) -> None: + assert not self._finished + self._rate_limiter.reset() + logger.info("%s: %s", self._message, status) + + def spin(self) -> None: + if self._finished: + return + if not self._rate_limiter.ready(): + return + self._update("still running...") + + def finish(self, final_status: str) -> None: + if self._finished: + return + self._update(f"finished with status '{final_status}'") + self._finished = True + + +class RateLimiter: + def __init__(self, min_update_interval_seconds: float) -> None: + self._min_update_interval_seconds = min_update_interval_seconds + self._last_update: float = 0 + + def ready(self) -> bool: + now = time.time() + delta = now - self._last_update + return delta >= self._min_update_interval_seconds + + def reset(self) -> None: + self._last_update = time.time() + + +@contextlib.contextmanager +def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]: + # Interactive spinner goes directly to sys.stdout rather than being routed + # through the logging system, but it acts like it has level INFO, + # i.e. it's only displayed if we're at level INFO or better. + # Non-interactive spinner goes through the logging system, so it is always + # in sync with logging configuration. + if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: + spinner: SpinnerInterface = InteractiveSpinner(message) + else: + spinner = NonInteractiveSpinner(message) + try: + with hidden_cursor(sys.stdout): + yield spinner + except KeyboardInterrupt: + spinner.finish("canceled") + raise + except Exception: + spinner.finish("error") + raise + else: + spinner.finish("done") + + +HIDE_CURSOR = "\x1b[?25l" +SHOW_CURSOR = "\x1b[?25h" + + +@contextlib.contextmanager +def hidden_cursor(file: IO[str]) -> Generator[None, None, None]: + # The Windows terminal does not support the hide/show cursor ANSI codes, + # even via colorama. So don't even try. + if WINDOWS: + yield + # We don't want to clutter the output with control characters if we're + # writing to a file, or if the user is running with --quiet. + # See https://github.com/pypa/pip/issues/3418 + elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: + yield + else: + file.write(HIDE_CURSOR) + try: + yield + finally: + file.write(SHOW_CURSOR) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f23386ebd92fb3c7965c940f54f7afded680aab Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/candidate.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/candidate.py new file mode 100644 index 0000000000000000000000000000000000000000..f27f283154ac5aa55d52ccac754138b36341ff6b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/candidate.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass + +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.models.link import Link + + +@dataclass(frozen=True) +class InstallationCandidate: + """Represents a potential "candidate" for installation.""" + + __slots__ = ["name", "version", "link"] + + name: str + version: Version + link: Link + + def __init__(self, name: str, version: str, link: Link) -> None: + object.__setattr__(self, "name", name) + object.__setattr__(self, "version", parse_version(version)) + object.__setattr__(self, "link", link) + + def __str__(self) -> str: + return f"{self.name!r} candidate (version {self.version} at {self.link})" diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/direct_url.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/direct_url.py new file mode 100644 index 0000000000000000000000000000000000000000..fc5ec8d4aa9b02b7264f7a5a0222e7e1fe215ad0 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/direct_url.py @@ -0,0 +1,224 @@ +""" PEP 610 """ + +import json +import re +import urllib.parse +from dataclasses import dataclass +from typing import Any, ClassVar, Dict, Iterable, Optional, Type, TypeVar, Union + +__all__ = [ + "DirectUrl", + "DirectUrlValidationError", + "DirInfo", + "ArchiveInfo", + "VcsInfo", +] + +T = TypeVar("T") + +DIRECT_URL_METADATA_NAME = "direct_url.json" +ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$") + + +class DirectUrlValidationError(Exception): + pass + + +def _get( + d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None +) -> Optional[T]: + """Get value from dictionary and verify expected type.""" + if key not in d: + return default + value = d[key] + if not isinstance(value, expected_type): + raise DirectUrlValidationError( + f"{value!r} has unexpected type for {key} (expected {expected_type})" + ) + return value + + +def _get_required( + d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None +) -> T: + value = _get(d, expected_type, key, default) + if value is None: + raise DirectUrlValidationError(f"{key} must have a value") + return value + + +def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType": + infos = [info for info in infos if info is not None] + if not infos: + raise DirectUrlValidationError( + "missing one of archive_info, dir_info, vcs_info" + ) + if len(infos) > 1: + raise DirectUrlValidationError( + "more than one of archive_info, dir_info, vcs_info" + ) + assert infos[0] is not None + return infos[0] + + +def _filter_none(**kwargs: Any) -> Dict[str, Any]: + """Make dict excluding None values.""" + return {k: v for k, v in kwargs.items() if v is not None} + + +@dataclass +class VcsInfo: + name: ClassVar = "vcs_info" + + vcs: str + commit_id: str + requested_revision: Optional[str] = None + + @classmethod + def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]: + if d is None: + return None + return cls( + vcs=_get_required(d, str, "vcs"), + commit_id=_get_required(d, str, "commit_id"), + requested_revision=_get(d, str, "requested_revision"), + ) + + def _to_dict(self) -> Dict[str, Any]: + return _filter_none( + vcs=self.vcs, + requested_revision=self.requested_revision, + commit_id=self.commit_id, + ) + + +class ArchiveInfo: + name = "archive_info" + + def __init__( + self, + hash: Optional[str] = None, + hashes: Optional[Dict[str, str]] = None, + ) -> None: + # set hashes before hash, since the hash setter will further populate hashes + self.hashes = hashes + self.hash = hash + + @property + def hash(self) -> Optional[str]: + return self._hash + + @hash.setter + def hash(self, value: Optional[str]) -> None: + if value is not None: + # Auto-populate the hashes key to upgrade to the new format automatically. + # We don't back-populate the legacy hash key from hashes. + try: + hash_name, hash_value = value.split("=", 1) + except ValueError: + raise DirectUrlValidationError( + f"invalid archive_info.hash format: {value!r}" + ) + if self.hashes is None: + self.hashes = {hash_name: hash_value} + elif hash_name not in self.hashes: + self.hashes = self.hashes.copy() + self.hashes[hash_name] = hash_value + self._hash = value + + @classmethod + def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]: + if d is None: + return None + return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes")) + + def _to_dict(self) -> Dict[str, Any]: + return _filter_none(hash=self.hash, hashes=self.hashes) + + +@dataclass +class DirInfo: + name: ClassVar = "dir_info" + + editable: bool = False + + @classmethod + def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]: + if d is None: + return None + return cls(editable=_get_required(d, bool, "editable", default=False)) + + def _to_dict(self) -> Dict[str, Any]: + return _filter_none(editable=self.editable or None) + + +InfoType = Union[ArchiveInfo, DirInfo, VcsInfo] + + +@dataclass +class DirectUrl: + url: str + info: InfoType + subdirectory: Optional[str] = None + + def _remove_auth_from_netloc(self, netloc: str) -> str: + if "@" not in netloc: + return netloc + user_pass, netloc_no_user_pass = netloc.split("@", 1) + if ( + isinstance(self.info, VcsInfo) + and self.info.vcs == "git" + and user_pass == "git" + ): + return netloc + if ENV_VAR_RE.match(user_pass): + return netloc + return netloc_no_user_pass + + @property + def redacted_url(self) -> str: + """url with user:password part removed unless it is formed with + environment variables as specified in PEP 610, or it is ``git`` + in the case of a git URL. + """ + purl = urllib.parse.urlsplit(self.url) + netloc = self._remove_auth_from_netloc(purl.netloc) + surl = urllib.parse.urlunsplit( + (purl.scheme, netloc, purl.path, purl.query, purl.fragment) + ) + return surl + + def validate(self) -> None: + self.from_dict(self.to_dict()) + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl": + return DirectUrl( + url=_get_required(d, str, "url"), + subdirectory=_get(d, str, "subdirectory"), + info=_exactly_one_of( + [ + ArchiveInfo._from_dict(_get(d, dict, "archive_info")), + DirInfo._from_dict(_get(d, dict, "dir_info")), + VcsInfo._from_dict(_get(d, dict, "vcs_info")), + ] + ), + ) + + def to_dict(self) -> Dict[str, Any]: + res = _filter_none( + url=self.redacted_url, + subdirectory=self.subdirectory, + ) + res[self.info.name] = self.info._to_dict() + return res + + @classmethod + def from_json(cls, s: str) -> "DirectUrl": + return cls.from_dict(json.loads(s)) + + def to_json(self) -> str: + return json.dumps(self.to_dict(), sort_keys=True) + + def is_local_editable(self) -> bool: + return isinstance(self.info, DirInfo) and self.info.editable diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/installation_report.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/installation_report.py new file mode 100644 index 0000000000000000000000000000000000000000..b9c6330df32bd2b57c885156cb7f8c0c8c3e3741 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/installation_report.py @@ -0,0 +1,56 @@ +from typing import Any, Dict, Sequence + +from pip._vendor.packaging.markers import default_environment + +from pip import __version__ +from pip._internal.req.req_install import InstallRequirement + + +class InstallationReport: + def __init__(self, install_requirements: Sequence[InstallRequirement]): + self._install_requirements = install_requirements + + @classmethod + def _install_req_to_dict(cls, ireq: InstallRequirement) -> Dict[str, Any]: + assert ireq.download_info, f"No download_info for {ireq}" + res = { + # PEP 610 json for the download URL. download_info.archive_info.hashes may + # be absent when the requirement was installed from the wheel cache + # and the cache entry was populated by an older pip version that did not + # record origin.json. + "download_info": ireq.download_info.to_dict(), + # is_direct is true if the requirement was a direct URL reference (which + # includes editable requirements), and false if the requirement was + # downloaded from a PEP 503 index or --find-links. + "is_direct": ireq.is_direct, + # is_yanked is true if the requirement was yanked from the index, but + # was still selected by pip to conform to PEP 592. + "is_yanked": ireq.link.is_yanked if ireq.link else False, + # requested is true if the requirement was specified by the user (aka + # top level requirement), and false if it was installed as a dependency of a + # requirement. https://peps.python.org/pep-0376/#requested + "requested": ireq.user_supplied, + # PEP 566 json encoding for metadata + # https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata + "metadata": ireq.get_dist().metadata_dict, + } + if ireq.user_supplied and ireq.extras: + # For top level requirements, the list of requested extras, if any. + res["requested_extras"] = sorted(ireq.extras) + return res + + def to_dict(self) -> Dict[str, Any]: + return { + "version": "1", + "pip_version": __version__, + "install": [ + self._install_req_to_dict(ireq) for ireq in self._install_requirements + ], + # https://peps.python.org/pep-0508/#environment-markers + # TODO: currently, the resolver uses the default environment to evaluate + # environment markers, so that is what we report here. In the future, it + # should also take into account options such as --python-version or + # --platform, perhaps under the form of an environment_override field? + # https://github.com/pypa/pip/issues/11198 + "environment": default_environment(), + } diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/scheme.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/scheme.py new file mode 100644 index 0000000000000000000000000000000000000000..06a9a550e34389c27ad3ee0bcef73d581cd4b448 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/scheme.py @@ -0,0 +1,25 @@ +""" +For types associated with installation schemes. + +For a general overview of available schemes and their context, see +https://docs.python.org/3/install/index.html#alternate-installation. +""" + +from dataclasses import dataclass + +SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"] + + +@dataclass(frozen=True) +class Scheme: + """A Scheme holds paths which are used as the base directories for + artifacts associated with a Python package. + """ + + __slots__ = SCHEME_KEYS + + platlib: str + purelib: str + headers: str + scripts: str + data: str diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/search_scope.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/search_scope.py new file mode 100644 index 0000000000000000000000000000000000000000..ee7bc86229acda0378707431e5b4e9f054305d85 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/search_scope.py @@ -0,0 +1,127 @@ +import itertools +import logging +import os +import posixpath +import urllib.parse +from dataclasses import dataclass +from typing import List + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.models.index import PyPI +from pip._internal.utils.compat import has_tls +from pip._internal.utils.misc import normalize_path, redact_auth_from_url + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class SearchScope: + """ + Encapsulates the locations that pip is configured to search. + """ + + __slots__ = ["find_links", "index_urls", "no_index"] + + find_links: List[str] + index_urls: List[str] + no_index: bool + + @classmethod + def create( + cls, + find_links: List[str], + index_urls: List[str], + no_index: bool, + ) -> "SearchScope": + """ + Create a SearchScope object after normalizing the `find_links`. + """ + # Build find_links. If an argument starts with ~, it may be + # a local file relative to a home directory. So try normalizing + # it and if it exists, use the normalized version. + # This is deliberately conservative - it might be fine just to + # blindly normalize anything starting with a ~... + built_find_links: List[str] = [] + for link in find_links: + if link.startswith("~"): + new_link = normalize_path(link) + if os.path.exists(new_link): + link = new_link + built_find_links.append(link) + + # If we don't have TLS enabled, then WARN if anyplace we're looking + # relies on TLS. + if not has_tls(): + for link in itertools.chain(index_urls, built_find_links): + parsed = urllib.parse.urlparse(link) + if parsed.scheme == "https": + logger.warning( + "pip is configured with locations that require " + "TLS/SSL, however the ssl module in Python is not " + "available." + ) + break + + return cls( + find_links=built_find_links, + index_urls=index_urls, + no_index=no_index, + ) + + def get_formatted_locations(self) -> str: + lines = [] + redacted_index_urls = [] + if self.index_urls and self.index_urls != [PyPI.simple_url]: + for url in self.index_urls: + redacted_index_url = redact_auth_from_url(url) + + # Parse the URL + purl = urllib.parse.urlsplit(redacted_index_url) + + # URL is generally invalid if scheme and netloc is missing + # there are issues with Python and URL parsing, so this test + # is a bit crude. See bpo-20271, bpo-23505. Python doesn't + # always parse invalid URLs correctly - it should raise + # exceptions for malformed URLs + if not purl.scheme and not purl.netloc: + logger.warning( + 'The index url "%s" seems invalid, please provide a scheme.', + redacted_index_url, + ) + + redacted_index_urls.append(redacted_index_url) + + lines.append( + "Looking in indexes: {}".format(", ".join(redacted_index_urls)) + ) + + if self.find_links: + lines.append( + "Looking in links: {}".format( + ", ".join(redact_auth_from_url(url) for url in self.find_links) + ) + ) + return "\n".join(lines) + + def get_index_urls_locations(self, project_name: str) -> List[str]: + """Returns the locations found via self.index_urls + + Checks the url_name on the main (first in the list) index and + use this url_name to produce all locations + """ + + def mkurl_pypi_url(url: str) -> str: + loc = posixpath.join( + url, urllib.parse.quote(canonicalize_name(project_name)) + ) + # For maximum compatibility with easy_install, ensure the path + # ends in a trailing slash. Although this isn't in the spec + # (and PyPI can handle it without the slash) some other index + # implementations might break if they relied on easy_install's + # behavior. + if not loc.endswith("/"): + loc = loc + "/" + return loc + + return [mkurl_pypi_url(url) for url in self.index_urls] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/selection_prefs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/selection_prefs.py new file mode 100644 index 0000000000000000000000000000000000000000..e9b50aa51756719d751ed0338aa7ca0a33d45f5a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/selection_prefs.py @@ -0,0 +1,53 @@ +from typing import Optional + +from pip._internal.models.format_control import FormatControl + + +# TODO: This needs Python 3.10's improved slots support for dataclasses +# to be converted into a dataclass. +class SelectionPreferences: + """ + Encapsulates the candidate selection preferences for downloading + and installing files. + """ + + __slots__ = [ + "allow_yanked", + "allow_all_prereleases", + "format_control", + "prefer_binary", + "ignore_requires_python", + ] + + # Don't include an allow_yanked default value to make sure each call + # site considers whether yanked releases are allowed. This also causes + # that decision to be made explicit in the calling code, which helps + # people when reading the code. + def __init__( + self, + allow_yanked: bool, + allow_all_prereleases: bool = False, + format_control: Optional[FormatControl] = None, + prefer_binary: bool = False, + ignore_requires_python: Optional[bool] = None, + ) -> None: + """Create a SelectionPreferences object. + + :param allow_yanked: Whether files marked as yanked (in the sense + of PEP 592) are permitted to be candidates for install. + :param format_control: A FormatControl object or None. Used to control + the selection of source packages / binary packages when consulting + the index and links. + :param prefer_binary: Whether to prefer an old, but valid, binary + dist over a new source dist. + :param ignore_requires_python: Whether to ignore incompatible + "Requires-Python" values in links. Defaults to False. + """ + if ignore_requires_python is None: + ignore_requires_python = False + + self.allow_yanked = allow_yanked + self.allow_all_prereleases = allow_all_prereleases + self.format_control = format_control + self.prefer_binary = prefer_binary + self.ignore_requires_python = ignore_requires_python diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/target_python.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/target_python.py new file mode 100644 index 0000000000000000000000000000000000000000..88925a9fd01a440e6de970bc234c3503b7f09cc1 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/target_python.py @@ -0,0 +1,121 @@ +import sys +from typing import List, Optional, Set, Tuple + +from pip._vendor.packaging.tags import Tag + +from pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot +from pip._internal.utils.misc import normalize_version_info + + +class TargetPython: + """ + Encapsulates the properties of a Python interpreter one is targeting + for a package install, download, etc. + """ + + __slots__ = [ + "_given_py_version_info", + "abis", + "implementation", + "platforms", + "py_version", + "py_version_info", + "_valid_tags", + "_valid_tags_set", + ] + + def __init__( + self, + platforms: Optional[List[str]] = None, + py_version_info: Optional[Tuple[int, ...]] = None, + abis: Optional[List[str]] = None, + implementation: Optional[str] = None, + ) -> None: + """ + :param platforms: A list of strings or None. If None, searches for + packages that are supported by the current system. Otherwise, will + find packages that can be built on the platforms passed in. These + packages will only be downloaded for distribution: they will + not be built locally. + :param py_version_info: An optional tuple of ints representing the + Python version information to use (e.g. `sys.version_info[:3]`). + This can have length 1, 2, or 3 when provided. + :param abis: A list of strings or None. This is passed to + compatibility_tags.py's get_supported() function as is. + :param implementation: A string or None. This is passed to + compatibility_tags.py's get_supported() function as is. + """ + # Store the given py_version_info for when we call get_supported(). + self._given_py_version_info = py_version_info + + if py_version_info is None: + py_version_info = sys.version_info[:3] + else: + py_version_info = normalize_version_info(py_version_info) + + py_version = ".".join(map(str, py_version_info[:2])) + + self.abis = abis + self.implementation = implementation + self.platforms = platforms + self.py_version = py_version + self.py_version_info = py_version_info + + # This is used to cache the return value of get_(un)sorted_tags. + self._valid_tags: Optional[List[Tag]] = None + self._valid_tags_set: Optional[Set[Tag]] = None + + def format_given(self) -> str: + """ + Format the given, non-None attributes for display. + """ + display_version = None + if self._given_py_version_info is not None: + display_version = ".".join( + str(part) for part in self._given_py_version_info + ) + + key_values = [ + ("platforms", self.platforms), + ("version_info", display_version), + ("abis", self.abis), + ("implementation", self.implementation), + ] + return " ".join( + f"{key}={value!r}" for key, value in key_values if value is not None + ) + + def get_sorted_tags(self) -> List[Tag]: + """ + Return the supported PEP 425 tags to check wheel candidates against. + + The tags are returned in order of preference (most preferred first). + """ + if self._valid_tags is None: + # Pass versions=None if no py_version_info was given since + # versions=None uses special default logic. + py_version_info = self._given_py_version_info + if py_version_info is None: + version = None + else: + version = version_info_to_nodot(py_version_info) + + tags = get_supported( + version=version, + platforms=self.platforms, + abis=self.abis, + impl=self.implementation, + ) + self._valid_tags = tags + + return self._valid_tags + + def get_unsorted_tags(self) -> Set[Tag]: + """Exactly the same as get_sorted_tags, but returns a set. + + This is important for performance. + """ + if self._valid_tags_set is None: + self._valid_tags_set = set(self.get_sorted_tags()) + + return self._valid_tags_set diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94e9d9588df31efeb8c15ab5c12b04765155047a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/base.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/base.py new file mode 100644 index 0000000000000000000000000000000000000000..42dade18c1ec2b825f756dad4aaa89f2d9e6ce21 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/base.py @@ -0,0 +1,20 @@ +from typing import Callable, List, Optional + +from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.req_set import RequirementSet + +InstallRequirementProvider = Callable[ + [str, Optional[InstallRequirement]], InstallRequirement +] + + +class BaseResolver: + def resolve( + self, root_reqs: List[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + raise NotImplementedError() + + def get_installation_order( + self, req_set: RequirementSet + ) -> List[InstallRequirement]: + raise NotImplementedError() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e2c4bfac59d860a8b91740e7cfb71b25a2f7648 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a36935438718ab96971fabc4053774c24cf062fe Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/resolver.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd0d7041bb7a0a32bdf22f825c52f87276e5e07 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/resolver.py @@ -0,0 +1,597 @@ +"""Dependency Resolution + +The dependency resolution in pip is performed as follows: + +for top-level requirements: + a. only one spec allowed per project, regardless of conflicts or not. + otherwise a "double requirement" exception is raised + b. they override sub-dependency requirements. +for sub-dependencies + a. "first found, wins" (where the order is breadth first) +""" + +import logging +import sys +from collections import defaultdict +from itertools import chain +from typing import DefaultDict, Iterable, List, Optional, Set, Tuple + +from pip._vendor.packaging import specifiers +from pip._vendor.packaging.requirements import Requirement + +from pip._internal.cache import WheelCache +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, + DistributionNotFound, + HashError, + HashErrors, + InstallationError, + NoneMetadataError, + UnsupportedPythonVersion, +) +from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution +from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.req_install import ( + InstallRequirement, + check_invalid_constraint_type, +) +from pip._internal.req.req_set import RequirementSet +from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider +from pip._internal.utils import compatibility_tags +from pip._internal.utils.compatibility_tags import get_supported +from pip._internal.utils.direct_url_helpers import direct_url_from_link +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import normalize_version_info +from pip._internal.utils.packaging import check_requires_python + +logger = logging.getLogger(__name__) + +DiscoveredDependencies = DefaultDict[Optional[str], List[InstallRequirement]] + + +def _check_dist_requires_python( + dist: BaseDistribution, + version_info: Tuple[int, int, int], + ignore_requires_python: bool = False, +) -> None: + """ + Check whether the given Python version is compatible with a distribution's + "Requires-Python" value. + + :param version_info: A 3-tuple of ints representing the Python + major-minor-micro version to check. + :param ignore_requires_python: Whether to ignore the "Requires-Python" + value if the given Python version isn't compatible. + + :raises UnsupportedPythonVersion: When the given Python version isn't + compatible. + """ + # This idiosyncratically converts the SpecifierSet to str and let + # check_requires_python then parse it again into SpecifierSet. But this + # is the legacy resolver so I'm just not going to bother refactoring. + try: + requires_python = str(dist.requires_python) + except FileNotFoundError as e: + raise NoneMetadataError(dist, str(e)) + try: + is_compatible = check_requires_python( + requires_python, + version_info=version_info, + ) + except specifiers.InvalidSpecifier as exc: + logger.warning( + "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc + ) + return + + if is_compatible: + return + + version = ".".join(map(str, version_info)) + if ignore_requires_python: + logger.debug( + "Ignoring failed Requires-Python check for package %r: %s not in %r", + dist.raw_name, + version, + requires_python, + ) + return + + raise UnsupportedPythonVersion( + f"Package {dist.raw_name!r} requires a different Python: " + f"{version} not in {requires_python!r}" + ) + + +class Resolver(BaseResolver): + """Resolves which packages need to be installed/uninstalled to perform \ + the requested operation without breaking the requirements of any package. + """ + + _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} + + def __init__( + self, + preparer: RequirementPreparer, + finder: PackageFinder, + wheel_cache: Optional[WheelCache], + make_install_req: InstallRequirementProvider, + use_user_site: bool, + ignore_dependencies: bool, + ignore_installed: bool, + ignore_requires_python: bool, + force_reinstall: bool, + upgrade_strategy: str, + py_version_info: Optional[Tuple[int, ...]] = None, + ) -> None: + super().__init__() + assert upgrade_strategy in self._allowed_strategies + + if py_version_info is None: + py_version_info = sys.version_info[:3] + else: + py_version_info = normalize_version_info(py_version_info) + + self._py_version_info = py_version_info + + self.preparer = preparer + self.finder = finder + self.wheel_cache = wheel_cache + + self.upgrade_strategy = upgrade_strategy + self.force_reinstall = force_reinstall + self.ignore_dependencies = ignore_dependencies + self.ignore_installed = ignore_installed + self.ignore_requires_python = ignore_requires_python + self.use_user_site = use_user_site + self._make_install_req = make_install_req + + self._discovered_dependencies: DiscoveredDependencies = defaultdict(list) + + def resolve( + self, root_reqs: List[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + """Resolve what operations need to be done + + As a side-effect of this method, the packages (and their dependencies) + are downloaded, unpacked and prepared for installation. This + preparation is done by ``pip.operations.prepare``. + + Once PyPI has static dependency metadata available, it would be + possible to move the preparation to become a step separated from + dependency resolution. + """ + requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels) + for req in root_reqs: + if req.constraint: + check_invalid_constraint_type(req) + self._add_requirement_to_set(requirement_set, req) + + # Actually prepare the files, and collect any exceptions. Most hash + # exceptions cannot be checked ahead of time, because + # _populate_link() needs to be called before we can make decisions + # based on link type. + discovered_reqs: List[InstallRequirement] = [] + hash_errors = HashErrors() + for req in chain(requirement_set.all_requirements, discovered_reqs): + try: + discovered_reqs.extend(self._resolve_one(requirement_set, req)) + except HashError as exc: + exc.req = req + hash_errors.append(exc) + + if hash_errors: + raise hash_errors + + return requirement_set + + def _add_requirement_to_set( + self, + requirement_set: RequirementSet, + install_req: InstallRequirement, + parent_req_name: Optional[str] = None, + extras_requested: Optional[Iterable[str]] = None, + ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]: + """Add install_req as a requirement to install. + + :param parent_req_name: The name of the requirement that needed this + added. The name is used because when multiple unnamed requirements + resolve to the same name, we could otherwise end up with dependency + links that point outside the Requirements set. parent_req must + already be added. Note that None implies that this is a user + supplied requirement, vs an inferred one. + :param extras_requested: an iterable of extras used to evaluate the + environment markers. + :return: Additional requirements to scan. That is either [] if + the requirement is not applicable, or [install_req] if the + requirement is applicable and has just been added. + """ + # If the markers do not match, ignore this requirement. + if not install_req.match_markers(extras_requested): + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + install_req.name, + install_req.markers, + ) + return [], None + + # If the wheel is not supported, raise an error. + # Should check this after filtering out based on environment markers to + # allow specifying different wheels based on the environment/OS, in a + # single requirements file. + if install_req.link and install_req.link.is_wheel: + wheel = Wheel(install_req.link.filename) + tags = compatibility_tags.get_supported() + if requirement_set.check_supported_wheels and not wheel.supported(tags): + raise InstallationError( + f"{wheel.filename} is not a supported wheel on this platform." + ) + + # This next bit is really a sanity check. + assert ( + not install_req.user_supplied or parent_req_name is None + ), "a user supplied req shouldn't have a parent" + + # Unnamed requirements are scanned again and the requirement won't be + # added as a dependency until after scanning. + if not install_req.name: + requirement_set.add_unnamed_requirement(install_req) + return [install_req], None + + try: + existing_req: Optional[InstallRequirement] = ( + requirement_set.get_requirement(install_req.name) + ) + except KeyError: + existing_req = None + + has_conflicting_requirement = ( + parent_req_name is None + and existing_req + and not existing_req.constraint + and existing_req.extras == install_req.extras + and existing_req.req + and install_req.req + and existing_req.req.specifier != install_req.req.specifier + ) + if has_conflicting_requirement: + raise InstallationError( + f"Double requirement given: {install_req} " + f"(already in {existing_req}, name={install_req.name!r})" + ) + + # When no existing requirement exists, add the requirement as a + # dependency and it will be scanned again after. + if not existing_req: + requirement_set.add_named_requirement(install_req) + # We'd want to rescan this requirement later + return [install_req], install_req + + # Assume there's no need to scan, and that we've already + # encountered this for scanning. + if install_req.constraint or not existing_req.constraint: + return [], existing_req + + does_not_satisfy_constraint = install_req.link and not ( + existing_req.link and install_req.link.path == existing_req.link.path + ) + if does_not_satisfy_constraint: + raise InstallationError( + f"Could not satisfy constraints for '{install_req.name}': " + "installation from path or url cannot be " + "constrained to a version" + ) + # If we're now installing a constraint, mark the existing + # object for real installation. + existing_req.constraint = False + # If we're now installing a user supplied requirement, + # mark the existing object as such. + if install_req.user_supplied: + existing_req.user_supplied = True + existing_req.extras = tuple( + sorted(set(existing_req.extras) | set(install_req.extras)) + ) + logger.debug( + "Setting %s extras to: %s", + existing_req, + existing_req.extras, + ) + # Return the existing requirement for addition to the parent and + # scanning again. + return [existing_req], existing_req + + def _is_upgrade_allowed(self, req: InstallRequirement) -> bool: + if self.upgrade_strategy == "to-satisfy-only": + return False + elif self.upgrade_strategy == "eager": + return True + else: + assert self.upgrade_strategy == "only-if-needed" + return req.user_supplied or req.constraint + + def _set_req_to_reinstall(self, req: InstallRequirement) -> None: + """ + Set a requirement to be installed. + """ + # Don't uninstall the conflict if doing a user install and the + # conflict is not a user install. + assert req.satisfied_by is not None + if not self.use_user_site or req.satisfied_by.in_usersite: + req.should_reinstall = True + req.satisfied_by = None + + def _check_skip_installed( + self, req_to_install: InstallRequirement + ) -> Optional[str]: + """Check if req_to_install should be skipped. + + This will check if the req is installed, and whether we should upgrade + or reinstall it, taking into account all the relevant user options. + + After calling this req_to_install will only have satisfied_by set to + None if the req_to_install is to be upgraded/reinstalled etc. Any + other value will be a dist recording the current thing installed that + satisfies the requirement. + + Note that for vcs urls and the like we can't assess skipping in this + routine - we simply identify that we need to pull the thing down, + then later on it is pulled down and introspected to assess upgrade/ + reinstalls etc. + + :return: A text reason for why it was skipped, or None. + """ + if self.ignore_installed: + return None + + req_to_install.check_if_exists(self.use_user_site) + if not req_to_install.satisfied_by: + return None + + if self.force_reinstall: + self._set_req_to_reinstall(req_to_install) + return None + + if not self._is_upgrade_allowed(req_to_install): + if self.upgrade_strategy == "only-if-needed": + return "already satisfied, skipping upgrade" + return "already satisfied" + + # Check for the possibility of an upgrade. For link-based + # requirements we have to pull the tree down and inspect to assess + # the version #, so it's handled way down. + if not req_to_install.link: + try: + self.finder.find_requirement(req_to_install, upgrade=True) + except BestVersionAlreadyInstalled: + # Then the best version is installed. + return "already up-to-date" + except DistributionNotFound: + # No distribution found, so we squash the error. It will + # be raised later when we re-try later to do the install. + # Why don't we just raise here? + pass + + self._set_req_to_reinstall(req_to_install) + return None + + def _find_requirement_link(self, req: InstallRequirement) -> Optional[Link]: + upgrade = self._is_upgrade_allowed(req) + best_candidate = self.finder.find_requirement(req, upgrade) + if not best_candidate: + return None + + # Log a warning per PEP 592 if necessary before returning. + link = best_candidate.link + if link.is_yanked: + reason = link.yanked_reason or "" + msg = ( + # Mark this as a unicode string to prevent + # "UnicodeEncodeError: 'ascii' codec can't encode character" + # in Python 2 when the reason contains non-ascii characters. + "The candidate selected for download or install is a " + f"yanked version: {best_candidate}\n" + f"Reason for being yanked: {reason}" + ) + logger.warning(msg) + + return link + + def _populate_link(self, req: InstallRequirement) -> None: + """Ensure that if a link can be found for this, that it is found. + + Note that req.link may still be None - if the requirement is already + installed and not needed to be upgraded based on the return value of + _is_upgrade_allowed(). + + If preparer.require_hashes is True, don't use the wheel cache, because + cached wheels, always built locally, have different hashes than the + files downloaded from the index server and thus throw false hash + mismatches. Furthermore, cached wheels at present have undeterministic + contents due to file modification times. + """ + if req.link is None: + req.link = self._find_requirement_link(req) + + if self.wheel_cache is None or self.preparer.require_hashes: + return + + assert req.link is not None, "_find_requirement_link unexpectedly returned None" + cache_entry = self.wheel_cache.get_cache_entry( + link=req.link, + package_name=req.name, + supported_tags=get_supported(), + ) + if cache_entry is not None: + logger.debug("Using cached wheel link: %s", cache_entry.link) + if req.link is req.original_link and cache_entry.persistent: + req.cached_wheel_source_link = req.link + if cache_entry.origin is not None: + req.download_info = cache_entry.origin + else: + # Legacy cache entry that does not have origin.json. + # download_info may miss the archive_info.hashes field. + req.download_info = direct_url_from_link( + req.link, link_is_in_wheel_cache=cache_entry.persistent + ) + req.link = cache_entry.link + + def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution: + """Takes a InstallRequirement and returns a single AbstractDist \ + representing a prepared variant of the same. + """ + if req.editable: + return self.preparer.prepare_editable_requirement(req) + + # satisfied_by is only evaluated by calling _check_skip_installed, + # so it must be None here. + assert req.satisfied_by is None + skip_reason = self._check_skip_installed(req) + + if req.satisfied_by: + return self.preparer.prepare_installed_requirement(req, skip_reason) + + # We eagerly populate the link, since that's our "legacy" behavior. + self._populate_link(req) + dist = self.preparer.prepare_linked_requirement(req) + + # NOTE + # The following portion is for determining if a certain package is + # going to be re-installed/upgraded or not and reporting to the user. + # This should probably get cleaned up in a future refactor. + + # req.req is only avail after unpack for URL + # pkgs repeat check_if_exists to uninstall-on-upgrade + # (#14) + if not self.ignore_installed: + req.check_if_exists(self.use_user_site) + + if req.satisfied_by: + should_modify = ( + self.upgrade_strategy != "to-satisfy-only" + or self.force_reinstall + or self.ignore_installed + or req.link.scheme == "file" + ) + if should_modify: + self._set_req_to_reinstall(req) + else: + logger.info( + "Requirement already satisfied (use --upgrade to upgrade): %s", + req, + ) + return dist + + def _resolve_one( + self, + requirement_set: RequirementSet, + req_to_install: InstallRequirement, + ) -> List[InstallRequirement]: + """Prepare a single requirements file. + + :return: A list of additional InstallRequirements to also install. + """ + # Tell user what we are doing for this requirement: + # obtain (editable), skipping, processing (local url), collecting + # (remote url or package name) + if req_to_install.constraint or req_to_install.prepared: + return [] + + req_to_install.prepared = True + + # Parse and return dependencies + dist = self._get_dist_for(req_to_install) + # This will raise UnsupportedPythonVersion if the given Python + # version isn't compatible with the distribution's Requires-Python. + _check_dist_requires_python( + dist, + version_info=self._py_version_info, + ignore_requires_python=self.ignore_requires_python, + ) + + more_reqs: List[InstallRequirement] = [] + + def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None: + # This idiosyncratically converts the Requirement to str and let + # make_install_req then parse it again into Requirement. But this is + # the legacy resolver so I'm just not going to bother refactoring. + sub_install_req = self._make_install_req(str(subreq), req_to_install) + parent_req_name = req_to_install.name + to_scan_again, add_to_parent = self._add_requirement_to_set( + requirement_set, + sub_install_req, + parent_req_name=parent_req_name, + extras_requested=extras_requested, + ) + if parent_req_name and add_to_parent: + self._discovered_dependencies[parent_req_name].append(add_to_parent) + more_reqs.extend(to_scan_again) + + with indent_log(): + # We add req_to_install before its dependencies, so that we + # can refer to it when adding dependencies. + assert req_to_install.name is not None + if not requirement_set.has_requirement(req_to_install.name): + # 'unnamed' requirements will get added here + # 'unnamed' requirements can only come from being directly + # provided by the user. + assert req_to_install.user_supplied + self._add_requirement_to_set( + requirement_set, req_to_install, parent_req_name=None + ) + + if not self.ignore_dependencies: + if req_to_install.extras: + logger.debug( + "Installing extra requirements: %r", + ",".join(req_to_install.extras), + ) + missing_requested = sorted( + set(req_to_install.extras) - set(dist.iter_provided_extras()) + ) + for missing in missing_requested: + logger.warning( + "%s %s does not provide the extra '%s'", + dist.raw_name, + dist.version, + missing, + ) + + available_requested = sorted( + set(dist.iter_provided_extras()) & set(req_to_install.extras) + ) + for subreq in dist.iter_dependencies(available_requested): + add_req(subreq, extras_requested=available_requested) + + return more_reqs + + def get_installation_order( + self, req_set: RequirementSet + ) -> List[InstallRequirement]: + """Create the installation order. + + The installation order is topological - requirements are installed + before the requiring thing. We break cycles at an arbitrary point, + and make no other guarantees. + """ + # The current implementation, which we may change at any point + # installs the user specified things in the order given, except when + # dependencies must come earlier to achieve topological order. + order = [] + ordered_reqs: Set[InstallRequirement] = set() + + def schedule(req: InstallRequirement) -> None: + if req.satisfied_by or req in ordered_reqs: + return + if req.constraint: + return + ordered_reqs.add(req) + for dep in self._discovered_dependencies[req.name]: + schedule(dep) + order.append(req) + + for install_req in req_set.requirements.values(): + schedule(install_req) + return order diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/candidates.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/candidates.py new file mode 100644 index 0000000000000000000000000000000000000000..6617644fe5316cb739050cc2266a5c015f9279e3 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/candidates.py @@ -0,0 +1,574 @@ +import logging +import sys +from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast + +from pip._vendor.packaging.requirements import InvalidRequirement +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import Version + +from pip._internal.exceptions import ( + HashError, + InstallationSubprocessError, + InvalidInstalledPackage, + MetadataInconsistent, + MetadataInvalid, +) +from pip._internal.metadata import BaseDistribution +from pip._internal.models.link import Link, links_equivalent +from pip._internal.models.wheel import Wheel +from pip._internal.req.constructors import ( + install_req_from_editable, + install_req_from_line, +) +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.direct_url_helpers import direct_url_from_link +from pip._internal.utils.misc import normalize_version_info + +from .base import Candidate, Requirement, format_name + +if TYPE_CHECKING: + from .factory import Factory + +logger = logging.getLogger(__name__) + +BaseCandidate = Union[ + "AlreadyInstalledCandidate", + "EditableCandidate", + "LinkCandidate", +] + +# Avoid conflicting with the PyPI package "Python". +REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "") + + +def as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]: + """The runtime version of BaseCandidate.""" + base_candidate_classes = ( + AlreadyInstalledCandidate, + EditableCandidate, + LinkCandidate, + ) + if isinstance(candidate, base_candidate_classes): + return candidate + return None + + +def make_install_req_from_link( + link: Link, template: InstallRequirement +) -> InstallRequirement: + assert not template.editable, "template is editable" + if template.req: + line = str(template.req) + else: + line = link.url + ireq = install_req_from_line( + line, + user_supplied=template.user_supplied, + comes_from=template.comes_from, + use_pep517=template.use_pep517, + isolated=template.isolated, + constraint=template.constraint, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, + ) + ireq.original_link = template.original_link + ireq.link = link + ireq.extras = template.extras + return ireq + + +def make_install_req_from_editable( + link: Link, template: InstallRequirement +) -> InstallRequirement: + assert template.editable, "template not editable" + ireq = install_req_from_editable( + link.url, + user_supplied=template.user_supplied, + comes_from=template.comes_from, + use_pep517=template.use_pep517, + isolated=template.isolated, + constraint=template.constraint, + permit_editable_wheels=template.permit_editable_wheels, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, + ) + ireq.extras = template.extras + return ireq + + +def _make_install_req_from_dist( + dist: BaseDistribution, template: InstallRequirement +) -> InstallRequirement: + if template.req: + line = str(template.req) + elif template.link: + line = f"{dist.canonical_name} @ {template.link.url}" + else: + line = f"{dist.canonical_name}=={dist.version}" + ireq = install_req_from_line( + line, + user_supplied=template.user_supplied, + comes_from=template.comes_from, + use_pep517=template.use_pep517, + isolated=template.isolated, + constraint=template.constraint, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, + ) + ireq.satisfied_by = dist + return ireq + + +class _InstallRequirementBackedCandidate(Candidate): + """A candidate backed by an ``InstallRequirement``. + + This represents a package request with the target not being already + in the environment, and needs to be fetched and installed. The backing + ``InstallRequirement`` is responsible for most of the leg work; this + class exposes appropriate information to the resolver. + + :param link: The link passed to the ``InstallRequirement``. The backing + ``InstallRequirement`` will use this link to fetch the distribution. + :param source_link: The link this candidate "originates" from. This is + different from ``link`` when the link is found in the wheel cache. + ``link`` would point to the wheel cache, while this points to the + found remote link (e.g. from pypi.org). + """ + + dist: BaseDistribution + is_installed = False + + def __init__( + self, + link: Link, + source_link: Link, + ireq: InstallRequirement, + factory: "Factory", + name: Optional[NormalizedName] = None, + version: Optional[Version] = None, + ) -> None: + self._link = link + self._source_link = source_link + self._factory = factory + self._ireq = ireq + self._name = name + self._version = version + self.dist = self._prepare() + self._hash: Optional[int] = None + + def __str__(self) -> str: + return f"{self.name} {self.version}" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self._link)!r})" + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash((self.__class__, self._link)) + return self._hash + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return links_equivalent(self._link, other._link) + return False + + @property + def source_link(self) -> Optional[Link]: + return self._source_link + + @property + def project_name(self) -> NormalizedName: + """The normalised name of the project the candidate refers to""" + if self._name is None: + self._name = self.dist.canonical_name + return self._name + + @property + def name(self) -> str: + return self.project_name + + @property + def version(self) -> Version: + if self._version is None: + self._version = self.dist.version + return self._version + + def format_for_error(self) -> str: + return ( + f"{self.name} {self.version} " + f"(from {self._link.file_path if self._link.is_file else self._link})" + ) + + def _prepare_distribution(self) -> BaseDistribution: + raise NotImplementedError("Override in subclass") + + def _check_metadata_consistency(self, dist: BaseDistribution) -> None: + """Check for consistency of project name and version of dist.""" + if self._name is not None and self._name != dist.canonical_name: + raise MetadataInconsistent( + self._ireq, + "name", + self._name, + dist.canonical_name, + ) + if self._version is not None and self._version != dist.version: + raise MetadataInconsistent( + self._ireq, + "version", + str(self._version), + str(dist.version), + ) + # check dependencies are valid + # TODO performance: this means we iterate the dependencies at least twice, + # we may want to cache parsed Requires-Dist + try: + list(dist.iter_dependencies(list(dist.iter_provided_extras()))) + except InvalidRequirement as e: + raise MetadataInvalid(self._ireq, str(e)) + + def _prepare(self) -> BaseDistribution: + try: + dist = self._prepare_distribution() + except HashError as e: + # Provide HashError the underlying ireq that caused it. This + # provides context for the resulting error message to show the + # offending line to the user. + e.req = self._ireq + raise + except InstallationSubprocessError as exc: + # The output has been presented already, so don't duplicate it. + exc.context = "See above for output." + raise + + self._check_metadata_consistency(dist) + return dist + + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: + requires = self.dist.iter_dependencies() if with_requires else () + for r in requires: + yield from self._factory.make_requirements_from_spec(str(r), self._ireq) + yield self._factory.make_requires_python_requirement(self.dist.requires_python) + + def get_install_requirement(self) -> Optional[InstallRequirement]: + return self._ireq + + +class LinkCandidate(_InstallRequirementBackedCandidate): + is_editable = False + + def __init__( + self, + link: Link, + template: InstallRequirement, + factory: "Factory", + name: Optional[NormalizedName] = None, + version: Optional[Version] = None, + ) -> None: + source_link = link + cache_entry = factory.get_wheel_cache_entry(source_link, name) + if cache_entry is not None: + logger.debug("Using cached wheel link: %s", cache_entry.link) + link = cache_entry.link + ireq = make_install_req_from_link(link, template) + assert ireq.link == link + if ireq.link.is_wheel and not ireq.link.is_file: + wheel = Wheel(ireq.link.filename) + wheel_name = canonicalize_name(wheel.name) + assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel" + # Version may not be present for PEP 508 direct URLs + if version is not None: + wheel_version = Version(wheel.version) + assert ( + version == wheel_version + ), f"{version!r} != {wheel_version!r} for wheel {name}" + + if cache_entry is not None: + assert ireq.link.is_wheel + assert ireq.link.is_file + if cache_entry.persistent and template.link is template.original_link: + ireq.cached_wheel_source_link = source_link + if cache_entry.origin is not None: + ireq.download_info = cache_entry.origin + else: + # Legacy cache entry that does not have origin.json. + # download_info may miss the archive_info.hashes field. + ireq.download_info = direct_url_from_link( + source_link, link_is_in_wheel_cache=cache_entry.persistent + ) + + super().__init__( + link=link, + source_link=source_link, + ireq=ireq, + factory=factory, + name=name, + version=version, + ) + + def _prepare_distribution(self) -> BaseDistribution: + preparer = self._factory.preparer + return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True) + + +class EditableCandidate(_InstallRequirementBackedCandidate): + is_editable = True + + def __init__( + self, + link: Link, + template: InstallRequirement, + factory: "Factory", + name: Optional[NormalizedName] = None, + version: Optional[Version] = None, + ) -> None: + super().__init__( + link=link, + source_link=link, + ireq=make_install_req_from_editable(link, template), + factory=factory, + name=name, + version=version, + ) + + def _prepare_distribution(self) -> BaseDistribution: + return self._factory.preparer.prepare_editable_requirement(self._ireq) + + +class AlreadyInstalledCandidate(Candidate): + is_installed = True + source_link = None + + def __init__( + self, + dist: BaseDistribution, + template: InstallRequirement, + factory: "Factory", + ) -> None: + self.dist = dist + self._ireq = _make_install_req_from_dist(dist, template) + self._factory = factory + self._version = None + + # This is just logging some messages, so we can do it eagerly. + # The returned dist would be exactly the same as self.dist because we + # set satisfied_by in _make_install_req_from_dist. + # TODO: Supply reason based on force_reinstall and upgrade_strategy. + skip_reason = "already satisfied" + factory.preparer.prepare_installed_requirement(self._ireq, skip_reason) + + def __str__(self) -> str: + return str(self.dist) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.dist!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AlreadyInstalledCandidate): + return NotImplemented + return self.name == other.name and self.version == other.version + + def __hash__(self) -> int: + return hash((self.name, self.version)) + + @property + def project_name(self) -> NormalizedName: + return self.dist.canonical_name + + @property + def name(self) -> str: + return self.project_name + + @property + def version(self) -> Version: + if self._version is None: + self._version = self.dist.version + return self._version + + @property + def is_editable(self) -> bool: + return self.dist.editable + + def format_for_error(self) -> str: + return f"{self.name} {self.version} (Installed)" + + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: + if not with_requires: + return + + try: + for r in self.dist.iter_dependencies(): + yield from self._factory.make_requirements_from_spec(str(r), self._ireq) + except InvalidRequirement as exc: + raise InvalidInstalledPackage(dist=self.dist, invalid_exc=exc) from None + + def get_install_requirement(self) -> Optional[InstallRequirement]: + return None + + +class ExtrasCandidate(Candidate): + """A candidate that has 'extras', indicating additional dependencies. + + Requirements can be for a project with dependencies, something like + foo[extra]. The extras don't affect the project/version being installed + directly, but indicate that we need additional dependencies. We model that + by having an artificial ExtrasCandidate that wraps the "base" candidate. + + The ExtrasCandidate differs from the base in the following ways: + + 1. It has a unique name, of the form foo[extra]. This causes the resolver + to treat it as a separate node in the dependency graph. + 2. When we're getting the candidate's dependencies, + a) We specify that we want the extra dependencies as well. + b) We add a dependency on the base candidate. + See below for why this is needed. + 3. We return None for the underlying InstallRequirement, as the base + candidate will provide it, and we don't want to end up with duplicates. + + The dependency on the base candidate is needed so that the resolver can't + decide that it should recommend foo[extra1] version 1.0 and foo[extra2] + version 2.0. Having those candidates depend on foo=1.0 and foo=2.0 + respectively forces the resolver to recognise that this is a conflict. + """ + + def __init__( + self, + base: BaseCandidate, + extras: FrozenSet[str], + *, + comes_from: Optional[InstallRequirement] = None, + ) -> None: + """ + :param comes_from: the InstallRequirement that led to this candidate if it + differs from the base's InstallRequirement. This will often be the + case in the sense that this candidate's requirement has the extras + while the base's does not. Unlike the InstallRequirement backed + candidates, this requirement is used solely for reporting purposes, + it does not do any leg work. + """ + self.base = base + self.extras = frozenset(canonicalize_name(e) for e in extras) + self._comes_from = comes_from if comes_from is not None else self.base._ireq + + def __str__(self) -> str: + name, rest = str(self.base).split(" ", 1) + return "{}[{}] {}".format(name, ",".join(self.extras), rest) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(base={self.base!r}, extras={self.extras!r})" + + def __hash__(self) -> int: + return hash((self.base, self.extras)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.base == other.base and self.extras == other.extras + return False + + @property + def project_name(self) -> NormalizedName: + return self.base.project_name + + @property + def name(self) -> str: + """The normalised name of the project the candidate refers to""" + return format_name(self.base.project_name, self.extras) + + @property + def version(self) -> Version: + return self.base.version + + def format_for_error(self) -> str: + return "{} [{}]".format( + self.base.format_for_error(), ", ".join(sorted(self.extras)) + ) + + @property + def is_installed(self) -> bool: + return self.base.is_installed + + @property + def is_editable(self) -> bool: + return self.base.is_editable + + @property + def source_link(self) -> Optional[Link]: + return self.base.source_link + + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: + factory = self.base._factory + + # Add a dependency on the exact base + # (See note 2b in the class docstring) + yield factory.make_requirement_from_candidate(self.base) + if not with_requires: + return + + # The user may have specified extras that the candidate doesn't + # support. We ignore any unsupported extras here. + valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras()) + invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras()) + for extra in sorted(invalid_extras): + logger.warning( + "%s %s does not provide the extra '%s'", + self.base.name, + self.version, + extra, + ) + + for r in self.base.dist.iter_dependencies(valid_extras): + yield from factory.make_requirements_from_spec( + str(r), + self._comes_from, + valid_extras, + ) + + def get_install_requirement(self) -> Optional[InstallRequirement]: + # We don't return anything here, because we always + # depend on the base candidate, and we'll get the + # install requirement from that. + return None + + +class RequiresPythonCandidate(Candidate): + is_installed = False + source_link = None + + def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None: + if py_version_info is not None: + version_info = normalize_version_info(py_version_info) + else: + version_info = sys.version_info[:3] + self._version = Version(".".join(str(c) for c in version_info)) + + # We don't need to implement __eq__() and __ne__() since there is always + # only one RequiresPythonCandidate in a resolution, i.e. the host Python. + # The built-in object.__eq__() and object.__ne__() do exactly what we want. + + def __str__(self) -> str: + return f"Python {self._version}" + + @property + def project_name(self) -> NormalizedName: + return REQUIRES_PYTHON_IDENTIFIER + + @property + def name(self) -> str: + return REQUIRES_PYTHON_IDENTIFIER + + @property + def version(self) -> Version: + return self._version + + def format_for_error(self) -> str: + return f"Python {self.version}" + + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: + return () + + def get_install_requirement(self) -> Optional[InstallRequirement]: + return None diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/provider.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/provider.py new file mode 100644 index 0000000000000000000000000000000000000000..fb0dd85f1124bed20da9402ada714af3f3584e07 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/provider.py @@ -0,0 +1,258 @@ +import collections +import math +from functools import lru_cache +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + Iterator, + Mapping, + Sequence, + TypeVar, + Union, +) + +from pip._vendor.resolvelib.providers import AbstractProvider + +from .base import Candidate, Constraint, Requirement +from .candidates import REQUIRES_PYTHON_IDENTIFIER +from .factory import Factory + +if TYPE_CHECKING: + from pip._vendor.resolvelib.providers import Preference + from pip._vendor.resolvelib.resolvers import RequirementInformation + + PreferenceInformation = RequirementInformation[Requirement, Candidate] + + _ProviderBase = AbstractProvider[Requirement, Candidate, str] +else: + _ProviderBase = AbstractProvider + +# Notes on the relationship between the provider, the factory, and the +# candidate and requirement classes. +# +# The provider is a direct implementation of the resolvelib class. Its role +# is to deliver the API that resolvelib expects. +# +# Rather than work with completely abstract "requirement" and "candidate" +# concepts as resolvelib does, pip has concrete classes implementing these two +# ideas. The API of Requirement and Candidate objects are defined in the base +# classes, but essentially map fairly directly to the equivalent provider +# methods. In particular, `find_matches` and `is_satisfied_by` are +# requirement methods, and `get_dependencies` is a candidate method. +# +# The factory is the interface to pip's internal mechanisms. It is stateless, +# and is created by the resolver and held as a property of the provider. It is +# responsible for creating Requirement and Candidate objects, and provides +# services to those objects (access to pip's finder and preparer). + + +D = TypeVar("D") +V = TypeVar("V") + + +def _get_with_identifier( + mapping: Mapping[str, V], + identifier: str, + default: D, +) -> Union[D, V]: + """Get item from a package name lookup mapping with a resolver identifier. + + This extra logic is needed when the target mapping is keyed by package + name, which cannot be directly looked up with an identifier (which may + contain requested extras). Additional logic is added to also look up a value + by "cleaning up" the extras from the identifier. + """ + if identifier in mapping: + return mapping[identifier] + # HACK: Theoretically we should check whether this identifier is a valid + # "NAME[EXTRAS]" format, and parse out the name part with packaging or + # some regular expression. But since pip's resolver only spits out three + # kinds of identifiers: normalized PEP 503 names, normalized names plus + # extras, and Requires-Python, we can cheat a bit here. + name, open_bracket, _ = identifier.partition("[") + if open_bracket and name in mapping: + return mapping[name] + return default + + +class PipProvider(_ProviderBase): + """Pip's provider implementation for resolvelib. + + :params constraints: A mapping of constraints specified by the user. Keys + are canonicalized project names. + :params ignore_dependencies: Whether the user specified ``--no-deps``. + :params upgrade_strategy: The user-specified upgrade strategy. + :params user_requested: A set of canonicalized package names that the user + supplied for pip to install/upgrade. + """ + + def __init__( + self, + factory: Factory, + constraints: Dict[str, Constraint], + ignore_dependencies: bool, + upgrade_strategy: str, + user_requested: Dict[str, int], + ) -> None: + self._factory = factory + self._constraints = constraints + self._ignore_dependencies = ignore_dependencies + self._upgrade_strategy = upgrade_strategy + self._user_requested = user_requested + self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf) + + def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str: + return requirement_or_candidate.name + + def get_preference( + self, + identifier: str, + resolutions: Mapping[str, Candidate], + candidates: Mapping[str, Iterator[Candidate]], + information: Mapping[str, Iterable["PreferenceInformation"]], + backtrack_causes: Sequence["PreferenceInformation"], + ) -> "Preference": + """Produce a sort key for given requirement based on preference. + + The lower the return value is, the more preferred this group of + arguments is. + + Currently pip considers the following in order: + + * Prefer if any of the known requirements is "direct", e.g. points to an + explicit URL. + * If equal, prefer if any requirement is "pinned", i.e. contains + operator ``===`` or ``==``. + * If equal, calculate an approximate "depth" and resolve requirements + closer to the user-specified requirements first. If the depth cannot + by determined (eg: due to no matching parents), it is considered + infinite. + * Order user-specified requirements by the order they are specified. + * If equal, prefers "non-free" requirements, i.e. contains at least one + operator, such as ``>=`` or ``<``. + * If equal, order alphabetically for consistency (helps debuggability). + """ + try: + next(iter(information[identifier])) + except StopIteration: + # There is no information for this identifier, so there's no known + # candidates. + has_information = False + else: + has_information = True + + if has_information: + lookups = (r.get_candidate_lookup() for r, _ in information[identifier]) + candidate, ireqs = zip(*lookups) + else: + candidate, ireqs = None, () + + operators = [ + specifier.operator + for specifier_set in (ireq.specifier for ireq in ireqs if ireq) + for specifier in specifier_set + ] + + direct = candidate is not None + pinned = any(op[:2] == "==" for op in operators) + unfree = bool(operators) + + try: + requested_order: Union[int, float] = self._user_requested[identifier] + except KeyError: + requested_order = math.inf + if has_information: + parent_depths = ( + self._known_depths[parent.name] if parent is not None else 0.0 + for _, parent in information[identifier] + ) + inferred_depth = min(d for d in parent_depths) + 1.0 + else: + inferred_depth = math.inf + else: + inferred_depth = 1.0 + self._known_depths[identifier] = inferred_depth + + requested_order = self._user_requested.get(identifier, math.inf) + + # Requires-Python has only one candidate and the check is basically + # free, so we always do it first to avoid needless work if it fails. + requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER + + # Prefer the causes of backtracking on the assumption that the problem + # resolving the dependency tree is related to the failures that caused + # the backtracking + backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes) + + return ( + not requires_python, + not direct, + not pinned, + not backtrack_cause, + inferred_depth, + requested_order, + not unfree, + identifier, + ) + + def find_matches( + self, + identifier: str, + requirements: Mapping[str, Iterator[Requirement]], + incompatibilities: Mapping[str, Iterator[Candidate]], + ) -> Iterable[Candidate]: + def _eligible_for_upgrade(identifier: str) -> bool: + """Are upgrades allowed for this project? + + This checks the upgrade strategy, and whether the project was one + that the user specified in the command line, in order to decide + whether we should upgrade if there's a newer version available. + + (Note that we don't need access to the `--upgrade` flag, because + an upgrade strategy of "to-satisfy-only" means that `--upgrade` + was not specified). + """ + if self._upgrade_strategy == "eager": + return True + elif self._upgrade_strategy == "only-if-needed": + user_order = _get_with_identifier( + self._user_requested, + identifier, + default=None, + ) + return user_order is not None + return False + + constraint = _get_with_identifier( + self._constraints, + identifier, + default=Constraint.empty(), + ) + return self._factory.find_candidates( + identifier=identifier, + requirements=requirements, + constraint=constraint, + prefers_installed=(not _eligible_for_upgrade(identifier)), + incompatibilities=incompatibilities, + is_satisfied_by=self.is_satisfied_by, + ) + + @lru_cache(maxsize=None) + def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool: + return requirement.is_satisfied_by(candidate) + + def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]: + with_requires = not self._ignore_dependencies + return [r for r in candidate.iter_dependencies(with_requires) if r is not None] + + @staticmethod + def is_backtrack_cause( + identifier: str, backtrack_causes: Sequence["PreferenceInformation"] + ) -> bool: + for backtrack_cause in backtrack_causes: + if identifier == backtrack_cause.requirement.name: + return True + if backtrack_cause.parent and identifier == backtrack_cause.parent.name: + return True + return False diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/reporter.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..0594569d85010d23cc03b178c56cb472908ed59c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/reporter.py @@ -0,0 +1,81 @@ +from collections import defaultdict +from logging import getLogger +from typing import Any, DefaultDict + +from pip._vendor.resolvelib.reporters import BaseReporter + +from .base import Candidate, Requirement + +logger = getLogger(__name__) + + +class PipReporter(BaseReporter): + def __init__(self) -> None: + self.reject_count_by_package: DefaultDict[str, int] = defaultdict(int) + + self._messages_at_reject_count = { + 1: ( + "pip is looking at multiple versions of {package_name} to " + "determine which version is compatible with other " + "requirements. This could take a while." + ), + 8: ( + "pip is still looking at multiple versions of {package_name} to " + "determine which version is compatible with other " + "requirements. This could take a while." + ), + 13: ( + "This is taking longer than usual. You might need to provide " + "the dependency resolver with stricter constraints to reduce " + "runtime. See https://pip.pypa.io/warnings/backtracking for " + "guidance. If you want to abort this run, press Ctrl + C." + ), + } + + def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: + self.reject_count_by_package[candidate.name] += 1 + + count = self.reject_count_by_package[candidate.name] + if count not in self._messages_at_reject_count: + return + + message = self._messages_at_reject_count[count] + logger.info("INFO: %s", message.format(package_name=candidate.name)) + + msg = "Will try a different candidate, due to conflict:" + for req_info in criterion.information: + req, parent = req_info.requirement, req_info.parent + # Inspired by Factory.get_installation_error + msg += "\n " + if parent: + msg += f"{parent.name} {parent.version} depends on " + else: + msg += "The user requested " + msg += req.format_for_error() + logger.debug(msg) + + +class PipDebuggingReporter(BaseReporter): + """A reporter that does an info log for every event it sees.""" + + def starting(self) -> None: + logger.info("Reporter.starting()") + + def starting_round(self, index: int) -> None: + logger.info("Reporter.starting_round(%r)", index) + + def ending_round(self, index: int, state: Any) -> None: + logger.info("Reporter.ending_round(%r, state)", index) + logger.debug("Reporter.ending_round(%r, %r)", index, state) + + def ending(self, state: Any) -> None: + logger.info("Reporter.ending(%r)", state) + + def adding_requirement(self, requirement: Requirement, parent: Candidate) -> None: + logger.info("Reporter.adding_requirement(%r, %r)", requirement, parent) + + def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: + logger.info("Reporter.rejecting_candidate(%r, %r)", criterion, candidate) + + def pinning(self, candidate: Candidate) -> None: + logger.info("Reporter.pinning(%r)", candidate) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/requirements.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..b04f41b2191840865307be5dd9cec4271f19bcd2 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/requirements.py @@ -0,0 +1,245 @@ +from typing import Any, Optional + +from pip._vendor.packaging.specifiers import SpecifierSet +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name + +from pip._internal.req.constructors import install_req_drop_extras +from pip._internal.req.req_install import InstallRequirement + +from .base import Candidate, CandidateLookup, Requirement, format_name + + +class ExplicitRequirement(Requirement): + def __init__(self, candidate: Candidate) -> None: + self.candidate = candidate + + def __str__(self) -> str: + return str(self.candidate) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.candidate!r})" + + def __hash__(self) -> int: + return hash(self.candidate) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, ExplicitRequirement): + return False + return self.candidate == other.candidate + + @property + def project_name(self) -> NormalizedName: + # No need to canonicalize - the candidate did this + return self.candidate.project_name + + @property + def name(self) -> str: + # No need to canonicalize - the candidate did this + return self.candidate.name + + def format_for_error(self) -> str: + return self.candidate.format_for_error() + + def get_candidate_lookup(self) -> CandidateLookup: + return self.candidate, None + + def is_satisfied_by(self, candidate: Candidate) -> bool: + return candidate == self.candidate + + +class SpecifierRequirement(Requirement): + def __init__(self, ireq: InstallRequirement) -> None: + assert ireq.link is None, "This is a link, not a specifier" + self._ireq = ireq + self._equal_cache: Optional[str] = None + self._hash: Optional[int] = None + self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + + @property + def _equal(self) -> str: + if self._equal_cache is not None: + return self._equal_cache + + self._equal_cache = str(self._ireq) + return self._equal_cache + + def __str__(self) -> str: + return str(self._ireq.req) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self._ireq.req)!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SpecifierRequirement): + return NotImplemented + return self._equal == other._equal + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash(self._equal) + return self._hash + + @property + def project_name(self) -> NormalizedName: + assert self._ireq.req, "Specifier-backed ireq is always PEP 508" + return canonicalize_name(self._ireq.req.name) + + @property + def name(self) -> str: + return format_name(self.project_name, self._extras) + + def format_for_error(self) -> str: + # Convert comma-separated specifiers into "A, B, ..., F and G" + # This makes the specifier a bit more "human readable", without + # risking a change in meaning. (Hopefully! Not all edge cases have + # been checked) + parts = [s.strip() for s in str(self).split(",")] + if len(parts) == 0: + return "" + elif len(parts) == 1: + return parts[0] + + return ", ".join(parts[:-1]) + " and " + parts[-1] + + def get_candidate_lookup(self) -> CandidateLookup: + return None, self._ireq + + def is_satisfied_by(self, candidate: Candidate) -> bool: + assert candidate.name == self.name, ( + f"Internal issue: Candidate is not for this requirement " + f"{candidate.name} vs {self.name}" + ) + # We can safely always allow prereleases here since PackageFinder + # already implements the prerelease logic, and would have filtered out + # prerelease candidates if the user does not expect them. + assert self._ireq.req, "Specifier-backed ireq is always PEP 508" + spec = self._ireq.req.specifier + return spec.contains(candidate.version, prereleases=True) + + +class SpecifierWithoutExtrasRequirement(SpecifierRequirement): + """ + Requirement backed by an install requirement on a base package. + Trims extras from its install requirement if there are any. + """ + + def __init__(self, ireq: InstallRequirement) -> None: + assert ireq.link is None, "This is a link, not a specifier" + self._ireq = install_req_drop_extras(ireq) + self._equal_cache: Optional[str] = None + self._hash: Optional[int] = None + self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras) + + @property + def _equal(self) -> str: + if self._equal_cache is not None: + return self._equal_cache + + self._equal_cache = str(self._ireq) + return self._equal_cache + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SpecifierWithoutExtrasRequirement): + return NotImplemented + return self._equal == other._equal + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash(self._equal) + return self._hash + + +class RequiresPythonRequirement(Requirement): + """A requirement representing Requires-Python metadata.""" + + def __init__(self, specifier: SpecifierSet, match: Candidate) -> None: + self.specifier = specifier + self._specifier_string = str(specifier) # for faster __eq__ + self._hash: Optional[int] = None + self._candidate = match + + def __str__(self) -> str: + return f"Python {self.specifier}" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self.specifier)!r})" + + def __hash__(self) -> int: + if self._hash is not None: + return self._hash + + self._hash = hash((self._specifier_string, self._candidate)) + return self._hash + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, RequiresPythonRequirement): + return False + return ( + self._specifier_string == other._specifier_string + and self._candidate == other._candidate + ) + + @property + def project_name(self) -> NormalizedName: + return self._candidate.project_name + + @property + def name(self) -> str: + return self._candidate.name + + def format_for_error(self) -> str: + return str(self) + + def get_candidate_lookup(self) -> CandidateLookup: + if self.specifier.contains(self._candidate.version, prereleases=True): + return self._candidate, None + return None, None + + def is_satisfied_by(self, candidate: Candidate) -> bool: + assert candidate.name == self._candidate.name, "Not Python candidate" + # We can safely always allow prereleases here since PackageFinder + # already implements the prerelease logic, and would have filtered out + # prerelease candidates if the user does not expect them. + return self.specifier.contains(candidate.version, prereleases=True) + + +class UnsatisfiableRequirement(Requirement): + """A requirement that cannot be satisfied.""" + + def __init__(self, name: NormalizedName) -> None: + self._name = name + + def __str__(self) -> str: + return f"{self._name} (unavailable)" + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self._name)!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UnsatisfiableRequirement): + return NotImplemented + return self._name == other._name + + def __hash__(self) -> int: + return hash(self._name) + + @property + def project_name(self) -> NormalizedName: + return self._name + + @property + def name(self) -> str: + return self._name + + def format_for_error(self) -> str: + return str(self) + + def get_candidate_lookup(self) -> CandidateLookup: + return None, None + + def is_satisfied_by(self, candidate: Candidate) -> bool: + return False diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..c12beef0b2a4344a4e0daca2540bbfd0c58ce777 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py @@ -0,0 +1,317 @@ +import contextlib +import functools +import logging +import os +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast + +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible +from pip._vendor.resolvelib import Resolver as RLResolver +from pip._vendor.resolvelib.structs import DirectedGraph + +from pip._internal.cache import WheelCache +from pip._internal.index.package_finder import PackageFinder +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req.constructors import install_req_extend_extras +from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.req_set import RequirementSet +from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider +from pip._internal.resolution.resolvelib.provider import PipProvider +from pip._internal.resolution.resolvelib.reporter import ( + PipDebuggingReporter, + PipReporter, +) +from pip._internal.utils.packaging import get_requirement + +from .base import Candidate, Requirement +from .factory import Factory + +if TYPE_CHECKING: + from pip._vendor.resolvelib.resolvers import Result as RLResult + + Result = RLResult[Requirement, Candidate, str] + + +logger = logging.getLogger(__name__) + + +class Resolver(BaseResolver): + _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} + + def __init__( + self, + preparer: RequirementPreparer, + finder: PackageFinder, + wheel_cache: Optional[WheelCache], + make_install_req: InstallRequirementProvider, + use_user_site: bool, + ignore_dependencies: bool, + ignore_installed: bool, + ignore_requires_python: bool, + force_reinstall: bool, + upgrade_strategy: str, + py_version_info: Optional[Tuple[int, ...]] = None, + ): + super().__init__() + assert upgrade_strategy in self._allowed_strategies + + self.factory = Factory( + finder=finder, + preparer=preparer, + make_install_req=make_install_req, + wheel_cache=wheel_cache, + use_user_site=use_user_site, + force_reinstall=force_reinstall, + ignore_installed=ignore_installed, + ignore_requires_python=ignore_requires_python, + py_version_info=py_version_info, + ) + self.ignore_dependencies = ignore_dependencies + self.upgrade_strategy = upgrade_strategy + self._result: Optional[Result] = None + + def resolve( + self, root_reqs: List[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + collected = self.factory.collect_root_requirements(root_reqs) + provider = PipProvider( + factory=self.factory, + constraints=collected.constraints, + ignore_dependencies=self.ignore_dependencies, + upgrade_strategy=self.upgrade_strategy, + user_requested=collected.user_requested, + ) + if "PIP_RESOLVER_DEBUG" in os.environ: + reporter: BaseReporter = PipDebuggingReporter() + else: + reporter = PipReporter() + resolver: RLResolver[Requirement, Candidate, str] = RLResolver( + provider, + reporter, + ) + + try: + limit_how_complex_resolution_can_be = 200000 + result = self._result = resolver.resolve( + collected.requirements, max_rounds=limit_how_complex_resolution_can_be + ) + + except ResolutionImpossible as e: + error = self.factory.get_installation_error( + cast("ResolutionImpossible[Requirement, Candidate]", e), + collected.constraints, + ) + raise error from e + + req_set = RequirementSet(check_supported_wheels=check_supported_wheels) + # process candidates with extras last to ensure their base equivalent is + # already in the req_set if appropriate. + # Python's sort is stable so using a binary key function keeps relative order + # within both subsets. + for candidate in sorted( + result.mapping.values(), key=lambda c: c.name != c.project_name + ): + ireq = candidate.get_install_requirement() + if ireq is None: + if candidate.name != candidate.project_name: + # extend existing req's extras + with contextlib.suppress(KeyError): + req = req_set.get_requirement(candidate.project_name) + req_set.add_named_requirement( + install_req_extend_extras( + req, get_requirement(candidate.name).extras + ) + ) + continue + + # Check if there is already an installation under the same name, + # and set a flag for later stages to uninstall it, if needed. + installed_dist = self.factory.get_dist_to_uninstall(candidate) + if installed_dist is None: + # There is no existing installation -- nothing to uninstall. + ireq.should_reinstall = False + elif self.factory.force_reinstall: + # The --force-reinstall flag is set -- reinstall. + ireq.should_reinstall = True + elif installed_dist.version != candidate.version: + # The installation is different in version -- reinstall. + ireq.should_reinstall = True + elif candidate.is_editable or installed_dist.editable: + # The incoming distribution is editable, or different in + # editable-ness to installation -- reinstall. + ireq.should_reinstall = True + elif candidate.source_link and candidate.source_link.is_file: + # The incoming distribution is under file:// + if candidate.source_link.is_wheel: + # is a local wheel -- do nothing. + logger.info( + "%s is already installed with the same version as the " + "provided wheel. Use --force-reinstall to force an " + "installation of the wheel.", + ireq.name, + ) + continue + + # is a local sdist or path -- reinstall + ireq.should_reinstall = True + else: + continue + + link = candidate.source_link + if link and link.is_yanked: + # The reason can contain non-ASCII characters, Unicode + # is required for Python 2. + msg = ( + "The candidate selected for download or install is a " + "yanked version: {name!r} candidate (version {version} " + "at {link})\nReason for being yanked: {reason}" + ).format( + name=candidate.name, + version=candidate.version, + link=link, + reason=link.yanked_reason or "", + ) + logger.warning(msg) + + req_set.add_named_requirement(ireq) + + reqs = req_set.all_requirements + self.factory.preparer.prepare_linked_requirements_more(reqs) + for req in reqs: + req.prepared = True + req.needs_more_preparation = False + return req_set + + def get_installation_order( + self, req_set: RequirementSet + ) -> List[InstallRequirement]: + """Get order for installation of requirements in RequirementSet. + + The returned list contains a requirement before another that depends on + it. This helps ensure that the environment is kept consistent as they + get installed one-by-one. + + The current implementation creates a topological ordering of the + dependency graph, giving more weight to packages with less + or no dependencies, while breaking any cycles in the graph at + arbitrary points. We make no guarantees about where the cycle + would be broken, other than it *would* be broken. + """ + assert self._result is not None, "must call resolve() first" + + if not req_set.requirements: + # Nothing is left to install, so we do not need an order. + return [] + + graph = self._result.graph + weights = get_topological_weights(graph, set(req_set.requirements.keys())) + + sorted_items = sorted( + req_set.requirements.items(), + key=functools.partial(_req_set_item_sorter, weights=weights), + reverse=True, + ) + return [ireq for _, ireq in sorted_items] + + +def get_topological_weights( + graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str] +) -> Dict[Optional[str], int]: + """Assign weights to each node based on how "deep" they are. + + This implementation may change at any point in the future without prior + notice. + + We first simplify the dependency graph by pruning any leaves and giving them + the highest weight: a package without any dependencies should be installed + first. This is done again and again in the same way, giving ever less weight + to the newly found leaves. The loop stops when no leaves are left: all + remaining packages have at least one dependency left in the graph. + + Then we continue with the remaining graph, by taking the length for the + longest path to any node from root, ignoring any paths that contain a single + node twice (i.e. cycles). This is done through a depth-first search through + the graph, while keeping track of the path to the node. + + Cycles in the graph result would result in node being revisited while also + being on its own path. In this case, take no action. This helps ensure we + don't get stuck in a cycle. + + When assigning weight, the longer path (i.e. larger length) is preferred. + + We are only interested in the weights of packages that are in the + requirement_keys. + """ + path: Set[Optional[str]] = set() + weights: Dict[Optional[str], int] = {} + + def visit(node: Optional[str]) -> None: + if node in path: + # We hit a cycle, so we'll break it here. + return + + # Time to visit the children! + path.add(node) + for child in graph.iter_children(node): + visit(child) + path.remove(node) + + if node not in requirement_keys: + return + + last_known_parent_count = weights.get(node, 0) + weights[node] = max(last_known_parent_count, len(path)) + + # Simplify the graph, pruning leaves that have no dependencies. + # This is needed for large graphs (say over 200 packages) because the + # `visit` function is exponentially slower then, taking minutes. + # See https://github.com/pypa/pip/issues/10557 + # We will loop until we explicitly break the loop. + while True: + leaves = set() + for key in graph: + if key is None: + continue + for _child in graph.iter_children(key): + # This means we have at least one child + break + else: + # No child. + leaves.add(key) + if not leaves: + # We are done simplifying. + break + # Calculate the weight for the leaves. + weight = len(graph) - 1 + for leaf in leaves: + if leaf not in requirement_keys: + continue + weights[leaf] = weight + # Remove the leaves from the graph, making it simpler. + for leaf in leaves: + graph.remove(leaf) + + # Visit the remaining graph. + # `None` is guaranteed to be the root node by resolvelib. + visit(None) + + # Sanity check: all requirement keys should be in the weights, + # and no other keys should be in the weights. + difference = set(weights.keys()).difference(requirement_keys) + assert not difference, difference + + return weights + + +def _req_set_item_sorter( + item: Tuple[str, InstallRequirement], + weights: Dict[Optional[str], int], +) -> Tuple[int, str]: + """Key function used to sort install requirements for installation. + + Based on the "weight" mapping calculated in ``get_installation_order()``. + The canonical package name is returned as the second member as a tie- + breaker to ensure the result is predictable, which is useful in tests. + """ + name = canonicalize_name(item[0]) + return weights[name], name