diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugging.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugging.py new file mode 100644 index 0000000000000000000000000000000000000000..edb3f4e8ca582e4f0bc938c761b1fdf1f9d56f6b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugging.py @@ -0,0 +1,20 @@ +############################################### +# +# Odds and ends for debugging +# +############################################### + +def print_call_chain(*args): + import sys + print(" ".join(map(str, args))) + f = sys._getframe(1) + while f: + name = f.f_code.co_name + s = f.f_locals.get('self', None) + if s: + c = getattr(s, "__class__", None) + if c: + name = "%s.%s" % (c.__name__, name) + print("Called from: %s %s" % (name, f.f_lineno)) + f = f.f_back + print("-" * 70) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/atomic.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/atomic.pxd new file mode 100644 index 0000000000000000000000000000000000000000..89a8e6ffc2eb2be0c24370b8af2d1f390e47e8f7 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/atomic.pxd @@ -0,0 +1,59 @@ + +cdef extern from "" namespace "std" nogil: + + cdef enum memory_order: + memory_order_relaxed + memory_order_consume + memory_order_acquire + memory_order_release + memory_order_acq_rel + memory_order_seq_cst + + cdef cppclass atomic[T]: + atomic() + atomic(T) + + bint is_lock_free() + void store(T) + void store(T, memory_order) + T load() + T load(memory_order) + T exchange(T) + T exchange(T, memory_order) + + bint compare_exchange_weak(T&, T, memory_order, memory_order) + bint compare_exchange_weak(T&, T, memory_order) + bint compare_exchange_weak(T&, T) + bint compare_exchange_strong(T&, T, memory_order, memory_order) + bint compare_exchange_strong(T&, T, memory_order) + bint compare_exchange_strong(T&, T) + + T fetch_add(T, memory_order) + T fetch_add(T) + T fetch_sub(T, memory_order) + T fetch_sub(T) + T fetch_and(T, memory_order) + T fetch_and(T) + T fetch_or(T, memory_order) + T fetch_or(T) + T fetch_xor(T, memory_order) + T fetch_xor(T) + + T operator++() + T operator++(int) + T operator--() + T operator--(int) + + # modify-in-place operators not yet supported by Cython: + # T operator+=(T) + # T operator-=(T) + # T operator&=(T) + # T operator|=(T) + # T operator^=(T) + + bint operator==(atomic[T]&, atomic[T]&) + bint operator==(atomic[T]&, T&) + bint operator==(T&, atomic[T]&) + bint operator!=(atomic[T]&, atomic[T]&) + bint operator!=(atomic[T]&, T&) + bint operator!=(T&, atomic[T]&) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/bit.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/bit.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3b6ffed0553a1b81d1aaa230046cbaead9f5fd42 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/bit.pxd @@ -0,0 +1,29 @@ +cdef extern from "" namespace "std" nogil: + # bit_cast (gcc >= 11.0, clang >= 14.0) + cdef To bit_cast[To, From](From&) + + # byteswap (C++23) + #cdef T byteswap[T](T) + + # integral powers of 2 (gcc >= 10.0, clang >= 12.0) + cdef bint has_single_bit[T](T) + cdef T bit_ceil[T](T) + cdef T bit_floor[T](T) + cdef int bit_width[T](T) + + # rotating (gcc >= 9.0, clang >= 9.0) + cdef T rotl[T](T, int shift) + cdef T rotr[T](T, int shift) + + # counting (gcc >= 9.0, clang >= 9.0) + cdef int countl_zero[T](T) + cdef int countl_one[T](T) + cdef int countr_zero[T](T) + cdef int countr_one[T](T) + cdef int popcount[T](T) + + # endian + cpdef enum class endian(int): + little, + big, + native diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/cmath.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/cmath.pxd new file mode 100644 index 0000000000000000000000000000000000000000..edc1983830752984da04cdd1b5874370d56399b4 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/cmath.pxd @@ -0,0 +1,518 @@ + +cdef extern from "" namespace "std" nogil: + # all C99 functions + float acos(float x) except + + double acos(double x) except + + long double acos(long double x) except + + float acosf(float x) except + + long double acosl(long double x) except + + + float asin(float x) except + + double asin(double x) except + + long double asin(long double x) except + + float asinf(float x) except + + long double asinl(long double x) except + + + float atan(float x) except + + double atan(double x) except + + long double atan(long double x) except + + float atanf(float x) except + + long double atanl(long double x) except + + + float atan2(float y, float x) except + + double atan2(double y, double x) except + + long double atan2(long double y, long double x) except + + float atan2f(float y, float x) except + + long double atan2l(long double y, long double x) except + + + float cos(float x) except + + double cos(double x) except + + long double cos(long double x) except + + float cosf(float x) except + + long double cosl(long double x) except + + + float sin(float x) except + + double sin(double x) except + + long double sin(long double x) except + + float sinf(float x) except + + long double sinl(long double x) except + + + float tan(float x) except + + double tan(double x) except + + long double tan(long double x) except + + float tanf(float x) except + + long double tanl(long double x) except + + + float acosh(float x) except + + double acosh(double x) except + + long double acosh(long double x) except + + float acoshf(float x) except + + long double acoshl(long double x) except + + + float asinh(float x) except + + double asinh(double x) except + + long double asinh(long double x) except + + float asinhf(float x) except + + long double asinhl(long double x) except + + + float atanh(float x) except + + double atanh(double x) except + + long double atanh(long double x) except + + float atanhf(float x) except + + long double atanhl(long double x) except + + + float cosh(float x) except + + double cosh(double x) except + + long double cosh(long double x) except + + float coshf(float x) except + + long double coshl(long double x) except + + + float sinh(float x) except + + double sinh(double x) except + + long double sinh(long double x) except + + float sinhf(float x) except + + long double sinhl(long double x) except + + + float tanh(float x) except + + double tanh(double x) except + + long double tanh(long double x) except + + float tanhf(float x) except + + long double tanhl(long double x) except + + + float exp(float x) except + + double exp(double x) except + + long double exp(long double x) except + + float expf(float x) except + + long double expl(long double x) except + + + float exp2(float x) except + + double exp2(double x) except + + long double exp2(long double x) except + + float exp2f(float x) except + + long double exp2l(long double x) except + + + float expm1(float x) except + + double expm1(double x) except + + long double expm1(long double x) except + + float expm1f(float x) except + + long double expm1l(long double x) except + + + float frexp(float value, int* exp) except + + double frexp(double value, int* exp) except + + long double frexp(long double value, int* exp) except + + float frexpf(float value, int* exp) except + + long double frexpl(long double value, int* exp) except + + + int ilogb(float x) except + + int ilogb(double x) except + + int ilogb(long double x) except + + int ilogbf(float x) except + + int ilogbl(long double x) except + + + float ldexp(float x, int exp) except + + double ldexp(double x, int exp) except + + long double ldexp(long double x, int exp) except + + float ldexpf(float x, int exp) except + + long double ldexpl(long double x, int exp) except + + + float log(float x) except + + double log(double x) except + + long double log(long double x) except + + float logf(float x) except + + long double logl(long double x) except + + + float log10(float x) except + + double log10(double x) except + + long double log10(long double x) except + + float log10f(float x) except + + long double log10l(long double x) except + + + float log1p(float x) except + + double log1p(double x) except + + long double log1p(long double x) except + + float log1pf(float x) except + + long double log1pl(long double x) except + + + float log2(float x) except + + double log2(double x) except + + long double log2(long double x) except + + float log2f(float x) except + + long double log2l(long double x) except + + + float logb(float x) except + + double logb(double x) except + + long double logb(long double x) except + + float logbf(float x) except + + long double logbl(long double x) except + + + float modf(float value, float* iptr) except + + double modf(double value, double* iptr) except + + long double modf(long double value, long double* iptr) except + + float modff(float value, float* iptr) except + + long double modfl(long double value, long double* iptr) except + + + float scalbn(float x, int n) except + + double scalbn(double x, int n) except + + long double scalbn(long double x, int n) except + + float scalbnf(float x, int n) except + + long double scalbnl(long double x, int n) except + + + float scalbln(float x, long int n) except + + double scalbln(double x, long int n) except + + long double scalbln(long double x, long int n) except + + float scalblnf(float x, long int n) except + + long double scalblnl(long double x, long int n) except + + + float cbrt(float x) except + + double cbrt(double x) except + + long double cbrt(long double x) except + + float cbrtf(float x) except + + long double cbrtl(long double x) except + + + # absolute values + int abs(int j) except + + long int abs(long int j) except + + long long int abs(long long int j) except + + float abs(float j) except + + double abs(double j) except + + long double abs(long double j) except + + + float fabs(float x) except + + double fabs(double x) except + + long double fabs(long double x) except + + float fabsf(float x) except + + long double fabsl(long double x) except + + + float hypot(float x, float y) except + + double hypot(double x, double y) except + + long double hypot(long double x, long double y) except + + float hypotf(float x, float y) except + + long double hypotl(long double x, long double y) except + + + # C++17 three-dimensional hypotenuse + float hypot(float x, float y, float z) except + + double hypot(double x, double y, double z) except + + long double hypot(long double x, long double y, long double z) except + + + float pow(float x, float y) except + + double pow(double x, double y) except + + long double pow(long double x, long double y) except + + float powf(float x, float y) except + + long double powl(long double x, long double y) except + + + float sqrt(float x) except + + double sqrt(double x) except + + long double sqrt(long double x) except + + float sqrtf(float x) except + + long double sqrtl(long double x) except + + + float erf(float x) except + + double erf(double x) except + + long double erf(long double x) except + + float erff(float x) except + + long double erfl(long double x) except + + + float erfc(float x) except + + double erfc(double x) except + + long double erfc(long double x) except + + float erfcf(float x) except + + long double erfcl(long double x) except + + + float lgamma(float x) except + + double lgamma(double x) except + + long double lgamma(long double x) except + + float lgammaf(float x) except + + long double lgammal(long double x) except + + + float tgamma(float x) except + + double tgamma(double x) except + + long double tgamma(long double x) except + + float tgammaf(float x) except + + long double tgammal(long double x) except + + + float ceil(float x) except + + double ceil(double x) except + + long double ceil(long double x) except + + float ceilf(float x) except + + long double ceill(long double x) except + + + float floor(float x) except + + double floor(double x) except + + long double floor(long double x) except + + float floorf(float x) except + + long double floorl(long double x) except + + + float nearbyint(float x) except + + double nearbyint(double x) except + + long double nearbyint(long double x) except + + float nearbyintf(float x) except + + long double nearbyintl(long double x) except + + + float rint(float x) except + + double rint(double x) except + + long double rint(long double x) except + + float rintf(float x) except + + long double rintl(long double x) except + + + long int lrint(float x) except + + long int lrint(double x) except + + long int lrint(long double x) except + + long int lrintf(float x) except + + long int lrintl(long double x) except + + + long long int llrint(float x) except + + long long int llrint(double x) except + + long long int llrint(long double x) except + + long long int llrintf(float x) except + + long long int llrintl(long double x) except + + + float round(float x) except + + double round(double x) except + + long double round(long double x) except + + float roundf(float x) except + + long double roundl(long double x) except + + + long int lround(float x) except + + long int lround(double x) except + + long int lround(long double x) except + + long int lroundf(float x) except + + long int lroundl(long double x) except + + + long long int llround(float x) except + + long long int llround(double x) except + + long long int llround(long double x) except + + long long int llroundf(float x) except + + long long int llroundl(long double x) except + + + float trunc(float x) except + + double trunc(double x) except + + long double trunc(long double x) except + + float truncf(float x) except + + long double truncl(long double x) except + + + float fmod(float x, float y) except + + double fmod(double x, double y) except + + long double fmod(long double x, long double y) except + + float fmodf(float x, float y) except + + long double fmodl(long double x, long double y) except + + + float remainder(float x, float y) except + + double remainder(double x, double y) except + + long double remainder(long double x, long double y) except + + float remainderf(float x, float y) except + + long double remainderl(long double x, long double y) except + + + float remquo(float x, float y, int* quo) except + + double remquo(double x, double y, int* quo) except + + long double remquo(long double x, long double y, int* quo) except + + float remquof(float x, float y, int* quo) except + + long double remquol(long double x, long double y, int* quo) except + + + float copysign(float x, float y) except + + double copysign(double x, double y) except + + long double copysign(long double x, long double y) except + + float copysignf(float x, float y) except + + long double copysignl(long double x, long double y) except + + + double nan(const char* tagp) except + + float nanf(const char* tagp) except + + long double nanl(const char* tagp) except + + + float nextafter(float x, float y) except + + double nextafter(double x, double y) except + + long double nextafter(long double x, long double y) except + + float nextafterf(float x, float y) except + + long double nextafterl(long double x, long double y) except + + + float nexttoward(float x, long double y) except + + double nexttoward(double x, long double y) except + + long double nexttoward(long double x, long double y) except + + float nexttowardf(float x, long double y) except + + long double nexttowardl(long double x, long double y) except + + + float fdim(float x, float y) except + + double fdim(double x, double y) except + + long double fdim(long double x, long double y) except + + float fdimf(float x, float y) except + + long double fdiml(long double x, long double y) except + + + float fmax(float x, float y) except + + double fmax(double x, double y) except + + long double fmax(long double x, long double y) except + + float fmaxf(float x, float y) except + + long double fmaxl(long double x, long double y) except + + + float fmin(float x, float y) except + + double fmin(double x, double y) except + + long double fmin(long double x, long double y) except + + float fminf(float x, float y) except + + long double fminl(long double x, long double y) except + + + float fma(float x, float y, float z) except + + double fma(double x, double y, double z) except + + long double fma(long double x, long double y, long double z) except + + float fmaf(float x, float y, float z) except + + long double fmal(long double x, long double y, long double z) except + + + # C++20 linear interpolation + float lerp(float a, float b, float t) + double lerp(double a, double b, double t) + long double lerp(long double a, long double b, long double t) + + # classification / comparison functions + int fpclassify(float x) except + + int fpclassify(double x) except + + int fpclassify(long double x) except + + + bint isfinite(float x) except + + bint isfinite(double x) except + + bint isfinite(long double x) except + + + bint isinf(float x) except + + bint isinf(double x) except + + bint isinf(long double x) except + + + bint isnan(float x) except + + bint isnan(double x) except + + bint isnan(long double x) except + + + bint isnormal(float x) except + + bint isnormal(double x) except + + bint isnormal(long double x) except + + + bint signbit(float x) except + + bint signbit(double x) except + + bint signbit(long double x) except + + + bint isgreater(float x, float y) except + + bint isgreater(double x, double y) except + + bint isgreater(long double x, long double y) except + + + bint isgreaterequal(float x, float y) except + + bint isgreaterequal(double x, double y) except + + bint isgreaterequal(long double x, long double y) except + + + bint isless(float x, float y) except + + bint isless(double x, double y) except + + bint isless(long double x, long double y) except + + + bint islessequal(float x, float y) except + + bint islessequal(double x, double y) except + + bint islessequal(long double x, long double y) except + + + bint islessgreater(float x, float y) except + + bint islessgreater(double x, double y) except + + bint islessgreater(long double x, long double y) except + + + bint isunordered(float x, float y) except + + bint isunordered(double x, double y) except + + bint isunordered(long double x, long double y) except + + + # C++17 mathematical special functions + + # associated Laguerre polynomials + double assoc_laguerre(unsigned int n, unsigned int m, double x) except + + float assoc_laguerref(unsigned int n, unsigned int m, float x) except + + long double assoc_laguerrel(unsigned int n, unsigned int m, long double x) except + + + # associated Legendre functions + double assoc_legendre(unsigned int l, unsigned int m, double x) except + + float assoc_legendref(unsigned int l, unsigned int m, float x) except + + long double assoc_legendrel(unsigned int l, unsigned int m, long double x) except + + + # beta function + double beta(double x, double y) except + + float betaf(float x, float y) except + + long double betal(long double x, long double y) except + + + # complete elliptic integral of the first kind + double comp_ellint_1(double k) except + + float comp_ellint_1f(float k) except + + long double comp_ellint_1l(long double k) except + + + # complete elliptic integral of the second kind + double comp_ellint_2(double k) except + + float comp_ellint_2f(float k) except + + long double comp_ellint_2l(long double k) except + + + # complete elliptic integral of the third kind + double comp_ellint_3(double k, double nu) except + + float comp_ellint_3f(float k, float nu) except + + long double comp_ellint_3l(long double k, long double nu) except + + + # regular modified cylindrical Bessel functions + double cyl_bessel_i(double nu, double x) except + + float cyl_bessel_if(float nu, float x) except + + long double cyl_bessel_il(long double nu, long double x) except + + + # cylindrical Bessel functions of the first kind + double cyl_bessel_j(double nu, double x) except + + float cyl_bessel_jf(float nu, float x) except + + long double cyl_bessel_jl(long double nu, long double x) except + + + # irregular modified cylindrical Bessel functions + double cyl_bessel_k(double nu, double x) except + + float cyl_bessel_kf(float nu, float x) except + + long double cyl_bessel_kl(long double nu, long double x) except + + + # cylindrical Neumann functions + # cylindrical Bessel functions of the second kind + double cyl_neumann(double nu, double x) except + + float cyl_neumannf(float nu, float x) except + + long double cyl_neumannl(long double nu, long double x) except + + + # incomplete elliptic integral of the first kind + double ellint_1(double k, double phi) except + + float ellint_1f(float k, float phi) except + + long double ellint_1l(long double k, long double phi) except + + + # incomplete elliptic integral of the second kind + double ellint_2(double k, double phi) except + + float ellint_2f(float k, float phi) except + + long double ellint_2l(long double k, long double phi) except + + + # incomplete elliptic integral of the third kind + double ellint_3(double k, double nu, double phi) except + + float ellint_3f(float k, float nu, float phi) except + + long double ellint_3l(long double k, long double nu, long double phi) except + + + # exponential integral + double expint(double x) except + + float expintf(float x) except + + long double expintl(long double x) except + + + # Hermite polynomials + double hermite(unsigned int n, double x) except + + float hermitef(unsigned int n, float x) except + + long double hermitel(unsigned int n, long double x) except + + + # Laguerre polynomials + double laguerre(unsigned int n, double x) except + + float laguerref(unsigned int n, float x) except + + long double laguerrel(unsigned int n, long double x) except + + + # Legendre polynomials + double legendre(unsigned int l, double x) except + + float legendref(unsigned int l, float x) except + + long double legendrel(unsigned int l, long double x) except + + + # Riemann zeta function + double riemann_zeta(double x) except + + float riemann_zetaf(float x) except + + long double riemann_zetal(long double x) except + + + # spherical Bessel functions of the first kind + double sph_bessel(unsigned int n, double x) except + + float sph_besself(unsigned int n, float x) except + + long double sph_bessell(unsigned int n, long double x) except + + + # spherical associated Legendre functions + double sph_legendre(unsigned int l, unsigned int m, double theta) except + + float sph_legendref(unsigned int l, unsigned int m, float theta) except + + long double sph_legendrel(unsigned int l, unsigned int m, long double theta) except + + + # spherical Neumann functions + # spherical Bessel functions of the second kind + double sph_neumann(unsigned int n, double x) except + + float sph_neumannf(unsigned int n, float x) except + + long double sph_neumannl(unsigned int n, long double x) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/list.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/list.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b69cd573e53a430b31c0d278fa15c63f8f7016ed --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/list.pxd @@ -0,0 +1,117 @@ +cdef extern from "" namespace "std" nogil: + cdef cppclass list[T,ALLOCATOR=*]: + ctypedef T value_type + ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + value_type& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + const value_type& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + value_type& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + const value_type& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + + list() except + + list(list&) except + + list(size_t, T&) except + + #list operator=(list&) + bint operator==(list&, list&) + bint operator!=(list&, list&) + bint operator<(list&, list&) + bint operator>(list&, list&) + bint operator<=(list&, list&) + bint operator>=(list&, list&) + void assign(size_t, T&) except + + T& back() + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + void clear() + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + iterator erase(iterator) + iterator erase(iterator, iterator) + T& front() + iterator insert(iterator, T&) + void insert(iterator, size_t, T&) + size_t max_size() + void merge(list&) except + + #void merge(list&, BinPred) + void pop_back() + void pop_front() + void push_back(T&) except + + void push_front(T&) except + + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + void remove(T&) except + + #void remove_if(UnPred) + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + void resize(size_t, T&) except + + void reverse() + size_t size() + void sort() except + + #void sort(BinPred) + void splice(iterator, list&) + void splice(iterator, list&, iterator) + void splice(iterator, list&, iterator, iterator) + void swap(list&) + void unique() + #void unique(BinPred) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/map.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/map.pxd new file mode 100644 index 0000000000000000000000000000000000000000..eb739509ac1f5ad0506a23f56ca6c957073f4e60 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/map.pxd @@ -0,0 +1,252 @@ +from .utility cimport pair + +cdef extern from "" namespace "std" nogil: + cdef cppclass map[T, U, COMPARE=*, ALLOCATOR=*]: + ctypedef T key_type + ctypedef U mapped_type + ctypedef pair[const T, U] value_type + ctypedef COMPARE key_compare + ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + # correct would be value_type& but this does not work + # well with cython's code gen + pair[T, U]& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + # correct would be const value_type& but this does not work + # well with cython's code gen + const pair[T, U]& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + # correct would be value_type& but this does not work + # well with cython's code gen + pair[T, U]& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + # correct would be const value_type& but this does not work + # well with cython's code gen + const pair[T, U]& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + + map() except + + map(map&) except + + #map(key_compare&) + U& operator[](const T&) + #map& operator=(map&) + bint operator==(map&, map&) + bint operator!=(map&, map&) + bint operator<(map&, map&) + bint operator>(map&, map&) + bint operator<=(map&, map&) + bint operator>=(map&, map&) + U& at(const T&) except + + const U& const_at "at"(const T&) except + + iterator begin() + const_iterator const_begin "begin" () + const_iterator cbegin() + void clear() + size_t count(const T&) + bint empty() + iterator end() + const_iterator const_end "end" () + const_iterator cend() + pair[iterator, iterator] equal_range(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + iterator erase(iterator) + iterator const_erase "erase"(const_iterator) + iterator erase(const_iterator, const_iterator) + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find" (const T&) + pair[iterator, bint] insert(const pair[T, U]&) except + + iterator insert(const_iterator, const pair[T, U]&) except + + void insert[InputIt](InputIt, InputIt) except + + #key_compare key_comp() + iterator lower_bound(const T&) + const_iterator const_lower_bound "lower_bound"(const T&) + size_t max_size() + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + size_t size() + void swap(map&) + iterator upper_bound(const T&) + const_iterator const_upper_bound "upper_bound"(const T&) + #value_compare value_comp() + # C++20 + bint contains(const T&) + + cdef cppclass multimap[T, U, COMPARE=*, ALLOCATOR=*]: + ctypedef T key_type + ctypedef U mapped_type + ctypedef pair[const T, U] value_type + ctypedef COMPARE key_compare + ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + # correct would be value_type& but this does not work + # well with cython's code gen + pair[T, U]& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + # correct would be const value_type& but this does not work + # well with cython's code gen + const pair[T, U]& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + # correct would be value_type& but this does not work + # well with cython's code gen + pair[T, U]& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + # correct would be const value_type& but this does not work + # well with cython's code gen + const pair[T, U]& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + + multimap() except + + multimap(const multimap&) except + + #multimap(key_compare&) + #multimap& operator=(multimap&) + bint operator==(const multimap&, const multimap&) + bint operator!=(const multimap&, const multimap&) + bint operator<(const multimap&, const multimap&) + bint operator>(const multimap&, const multimap&) + bint operator<=(const multimap&, const multimap&) + bint operator>=(const multimap&, const multimap&) + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + void clear() + size_t count(const T&) + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + pair[iterator, iterator] equal_range(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + iterator erase(iterator) + iterator const_erase "erase"(const_iterator) + iterator erase(const_iterator, const_iterator) + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find"(const T&) + iterator insert(const pair[T, U]&) except + + iterator insert(const_iterator, const pair[T, U]&) except + + void insert[InputIt](InputIt, InputIt) except + + #key_compare key_comp() + iterator lower_bound(const T&) + const_iterator const_lower_bound "lower_bound"(const T&) + size_t max_size() + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + size_t size() + void swap(multimap&) + iterator upper_bound(const T&) + const_iterator const_upper_bound "upper_bound"(const T&) + #value_compare value_comp() + bint contains(const T&) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/numeric.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/numeric.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a9fb37205a56ef05090b0b5438eb94032c897b02 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/numeric.pxd @@ -0,0 +1,131 @@ +cdef extern from "" namespace "std" nogil: + T inner_product[InputIt1, InputIt2, T](InputIt1 first1, InputIt1 last1, InputIt2 first2, T init) + + T inner_product[InputIt1, InputIt2, T, BinaryOperation1, BinaryOperation2](InputIt1 first1, InputIt1 last1, + InputIt2 first2, T init, + BinaryOperation1 op1, + BinaryOperation2 op2) + + void iota[ForwardIt, T](ForwardIt first, ForwardIt last, T value) + + T accumulate[InputIt, T](InputIt first, InputIt last, T init) + + T accumulate[InputIt, T, BinaryOperation](InputIt first, InputIt last, T init, BinaryOperation op) + + void adjacent_difference[InputIt, OutputIt](InputIt in_first, InputIt in_last, OutputIt out_first) + + void adjacent_difference[InputIt, OutputIt, BinaryOperation](InputIt in_first, InputIt in_last, OutputIt out_first, + BinaryOperation op) + + void partial_sum[InputIt, OutputIt](InputIt in_first, OutputIt in_last, OutputIt out_first) + + void partial_sum[InputIt, OutputIt, BinaryOperation](InputIt in_first, InputIt in_last, OutputIt out_first, + BinaryOperation op) + + + T reduce[InputIt, T](InputIt first, InputIt last, T init) + + # ambiguous with next overload + #T reduce[ExecutionPolicy, ForwardIt, T](ExecutionPolicy&& policy, + # ForwardIt first, ForwardIt last, T init) + + T reduce[InputIt, T, BinaryOp](InputIt first, InputIt last, T init, BinaryOp binary_op) + + T reduce[ExecutionPolicy, ForwardIt, T, BinaryOp](ExecutionPolicy&& policy, + ForwardIt first, ForwardIt last, T init, BinaryOp binary_op) + + T transform_reduce[InputIt1, InputIt2, T](InputIt1 first1, InputIt1 last1, + InputIt2 first2, T init) + + T transform_reduce[InputIt1, InputIt2, T, BinaryReductionOp, BinaryTransformOp]( + InputIt1 first1, InputIt1 last1, InputIt2 first2, T init, + BinaryReductionOp reduce, BinaryTransformOp transform) + + T transform_reduce[InputIt, T, BinaryReductionOp, UnaryTransformOp]( + InputIt first, InputIt last, T init, BinaryReductionOp reduce, + UnaryTransformOp transform) + + # ambiguous with previous overload + #T transform_reduce[ExecutionPolicy, ForwardIt1, ForwardIt2, T]( + # ExecutionPolicy&& policy, ForwardIt1 first1, ForwardIt1 last1, + # ForwardIt2 first2, T init) + + T transform_reduce[ExecutionPolicy, ForwardIt1, ForwardIt2, T, BinaryReductionOp, BinaryTransformOp]( + ExecutionPolicy&& policy, ForwardIt1 first1, ForwardIt1 last1, ForwardIt2 first2, T init, + BinaryReductionOp reduce, BinaryTransformOp transform) + + # ambiguous with second overload + #T transform_reduce[ExecutionPolicy, ForwardIt, T, BinaryReductionOp, UnaryTransformOp]( + # ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, T init, BinaryReductionOp reduce, + # UnaryTransformOp transform) + + OutputIt inclusive_scan[InputIt, OutputIt](InputIt first, InputIt last, OutputIt d_first) + + # ambiguous with next overload + # ForwardIt2 inclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2]( + # ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, + # ForwardIt2 d_first) + + OutputIt inclusive_scan[InputIt, OutputIt, BinaryOperation]( + InputIt first, InputIt last, OutputIt d_first, BinaryOperation binary_op) + + # ambiguous with next overload + # ForwardIt2 inclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, BinaryOperation]( + # ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, ForwardIt2 d_first, + # BinaryOperation binary_op) + + OutputIt inclusive_scan[InputIt, OutputIt, BinaryOperation, T]( + InputIt first, InputIt last, OutputIt d_first, BinaryOperation binary_op, + T init) + + # + # ForwardIt2 inclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, BinaryOperation, T]( + # ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, ForwardIt2 d_first, + # BinaryOperation binary_op, T init) + + OutputIt exclusive_scan[InputIt, OutputIt, T](InputIt first, InputIt last, + OutputIt d_first, T init) + + # ambiguous with next overload + #ForwardIt2 exclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, T]( + # ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, + # ForwardIt2 d_first, T init) + + OutputIt exclusive_scan[InputIt, OutputIt, T, BinaryOperation]( + InputIt first, InputIt last, OutputIt d_first, T init, BinaryOperation binary_op) + + ForwardIt2 exclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, T, BinaryOperation]( + ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, ForwardIt2 d_first, + T init, BinaryOperation binary_op) + + OutputIt transform_inclusive_scan[InputIt, OutputIt, BinaryOperation, UnaryOperation]( + InputIt first, InputIt last, OutputIt d_first, BinaryOperation binary_op, + UnaryOperation unary_op) + + # ambiguous with next overload + # ForwardIt2 transform_inclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, BinaryOperation, UnaryOperation]( + # ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, ForwardIt2 d_first, + # BinaryOperation binary_op, UnaryOperation unary_op) + + OutputIt transform_inclusive_scan[InputIt, OutputIt, BinaryOperation, UnaryOperation, T]( + InputIt first, InputIt last, OutputIt d_first, BinaryOperation binary_op, + UnaryOperation unary_op, T init) + + ForwardIt2 transform_inclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, BinaryOperation, UnaryOperation, T]( + ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, ForwardIt2 d_first, + BinaryOperation binary_op, UnaryOperation unary_op, T init) + + OutputIt transform_exclusive_scan[InputIt, OutputIt, T, BinaryOperation, UnaryOperation]( + InputIt first, InputIt last, OutputIt d_first, T init, BinaryOperation binary_op, + UnaryOperation unary_op) + + ForwardIt2 transform_exclusive_scan[ExecutionPolicy, ForwardIt1, ForwardIt2, T, BinaryOperation, UnaryOperation]( + ExecutionPolicy&& policy, ForwardIt1 first, ForwardIt1 last, ForwardIt2 d_first, + T init, BinaryOperation binary_op, UnaryOperation unary_op) + + # C++17 + T gcd[T](T a, T b) + T lcm[T](T a, T b) + + # C++20 + T midpoint[T](T a, T b) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/pair.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/pair.pxd new file mode 100644 index 0000000000000000000000000000000000000000..869fe6674d21736340a9907af809666c7d87249e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/pair.pxd @@ -0,0 +1 @@ +from .utility cimport pair diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/set.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/set.pxd new file mode 100644 index 0000000000000000000000000000000000000000..444b1ce9f38e259ec6847052c914f31c2e958c95 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/set.pxd @@ -0,0 +1,228 @@ +from .utility cimport pair + +cdef extern from "" namespace "std" nogil: + cdef cppclass set[T]: + ctypedef T value_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + value_type& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + const value_type& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + value_type& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + const value_type& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + + set() except + + set(set&) except + + #set(key_compare&) + #set& operator=(set&) + bint operator==(set&, set&) + bint operator!=(set&, set&) + bint operator<(set&, set&) + bint operator>(set&, set&) + bint operator<=(set&, set&) + bint operator>=(set&, set&) + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + void clear() + size_t count(const T&) + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + pair[iterator, iterator] equal_range(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + iterator erase(iterator) + iterator const_erase "erase"(const_iterator) + iterator erase(const_iterator, const_iterator) + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find"(const T&) + pair[iterator, bint] insert(const T&) except + + iterator insert(iterator, const T&) except + + iterator insert(const_iterator, const T&) except + + iterator const_insert "insert"(const_iterator, const T&) except + + void insert[InputIt](InputIt, InputIt) except + + #key_compare key_comp() + iterator lower_bound(const T&) + const_iterator const_lower_bound "lower_bound"(const T&) + size_t max_size() + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + size_t size() + void swap(set&) + iterator upper_bound(const T&) + const_iterator const_upper_bound "upper_bound"(const T&) + #value_compare value_comp() + # C++20 + bint contains(const T&) + + cdef cppclass multiset[T]: + ctypedef T value_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + value_type& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + const value_type& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + value_type& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + const value_type& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + + multiset() except + + multiset(multiset&) except + + #multiset(key_compare&) + #multiset& operator=(multiset&) + bint operator==(multiset&, multiset&) + bint operator!=(multiset&, multiset&) + bint operator<(multiset&, multiset&) + bint operator>(multiset&, multiset&) + bint operator<=(multiset&, multiset&) + bint operator>=(multiset&, multiset&) + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + void clear() + size_t count(const T&) + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + pair[iterator, iterator] equal_range(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + iterator erase(iterator) + iterator const_erase "erase"(const_iterator) + iterator erase(const_iterator, const_iterator) + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find"(const T&) + iterator insert(const T&) except + + iterator insert(iterator, const T&) except + + iterator const_insert "insert"(const_iterator, const T&) except + + void insert[InputIt](InputIt, InputIt) except + + #key_compare key_comp() + iterator lower_bound(const T&) + const_iterator const_lower_bound "lower_bound"(const T&) + size_t max_size() + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + size_t size() + void swap(multiset&) + iterator upper_bound(const T&) + const_iterator const_upper_bound "upper_bound"(const T&) + # C++20 + bint contains(const T&) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/typeinfo.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/typeinfo.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9118e0064982f26070eede94620fb0d8873dca2d --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/typeinfo.pxd @@ -0,0 +1,10 @@ +from libcpp cimport bool + +cdef extern from "" namespace "std" nogil: + cdef cppclass type_info: + const char* name() + int before(const type_info&) + bool operator==(const type_info&) + bool operator!=(const type_info&) + # C++11-only + size_t hash_code() diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/utility.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/utility.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e0df69b1661d77b0cd6bbf40b11db240704ba295 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/utility.pxd @@ -0,0 +1,30 @@ +cdef extern from "" namespace "std" nogil: + cdef cppclass pair[T, U]: + ctypedef T first_type + ctypedef U second_type + T first + U second + pair() except + + pair(pair&) except + + pair(T&, U&) except + + bint operator==(pair&, pair&) + bint operator!=(pair&, pair&) + bint operator<(pair&, pair&) + bint operator>(pair&, pair&) + bint operator<=(pair&, pair&) + bint operator>=(pair&, pair&) + +cdef extern from * namespace "cython_std" nogil: + """ + #if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600) + // move should be defined for these versions of MSVC, but __cplusplus isn't set usefully + #include + + namespace cython_std { + template typename std::remove_reference::type&& move(T& t) noexcept { return std::move(t); } + template typename std::remove_reference::type&& move(T&& t) noexcept { return std::move(t); } + } + + #endif + """ + cdef T move[T](T) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/vector.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/vector.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3def8a568b14ec9603ed02dc931371b6ca466ee8 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/libcpp/vector.pxd @@ -0,0 +1,167 @@ +cdef extern from "" namespace "std" nogil: + cdef cppclass vector[T,ALLOCATOR=*]: + ctypedef T value_type + ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass const_iterator + cppclass iterator: + iterator() except + + iterator(iterator&) except + + T& operator*() + iterator operator++() + iterator operator--() + iterator operator++(int) + iterator operator--(int) + iterator operator+(size_type) + iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + bint operator<(iterator) + bint operator<(const_iterator) + bint operator>(iterator) + bint operator>(const_iterator) + bint operator<=(iterator) + bint operator<=(const_iterator) + bint operator>=(iterator) + bint operator>=(const_iterator) + cppclass const_iterator: + const_iterator() except + + const_iterator(iterator&) except + + const_iterator(const_iterator&) except + + operator=(iterator&) except + + const T& operator*() + const_iterator operator++() + const_iterator operator--() + const_iterator operator++(int) + const_iterator operator--(int) + const_iterator operator+(size_type) + const_iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(iterator) + bint operator==(const_iterator) + bint operator!=(iterator) + bint operator!=(const_iterator) + bint operator<(iterator) + bint operator<(const_iterator) + bint operator>(iterator) + bint operator>(const_iterator) + bint operator<=(iterator) + bint operator<=(const_iterator) + bint operator>=(iterator) + bint operator>=(const_iterator) + + cppclass const_reverse_iterator + cppclass reverse_iterator: + reverse_iterator() except + + reverse_iterator(reverse_iterator&) except + + T& operator*() + reverse_iterator operator++() + reverse_iterator operator--() + reverse_iterator operator++(int) + reverse_iterator operator--(int) + reverse_iterator operator+(size_type) + reverse_iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + bint operator<(reverse_iterator) + bint operator<(const_reverse_iterator) + bint operator>(reverse_iterator) + bint operator>(const_reverse_iterator) + bint operator<=(reverse_iterator) + bint operator<=(const_reverse_iterator) + bint operator>=(reverse_iterator) + bint operator>=(const_reverse_iterator) + cppclass const_reverse_iterator: + const_reverse_iterator() except + + const_reverse_iterator(reverse_iterator&) except + + operator=(reverse_iterator&) except + + const T& operator*() + const_reverse_iterator operator++() + const_reverse_iterator operator--() + const_reverse_iterator operator++(int) + const_reverse_iterator operator--(int) + const_reverse_iterator operator+(size_type) + const_reverse_iterator operator-(size_type) + difference_type operator-(iterator) + difference_type operator-(const_iterator) + bint operator==(reverse_iterator) + bint operator==(const_reverse_iterator) + bint operator!=(reverse_iterator) + bint operator!=(const_reverse_iterator) + bint operator<(reverse_iterator) + bint operator<(const_reverse_iterator) + bint operator>(reverse_iterator) + bint operator>(const_reverse_iterator) + bint operator<=(reverse_iterator) + bint operator<=(const_reverse_iterator) + bint operator>=(reverse_iterator) + bint operator>=(const_reverse_iterator) + + vector() except + + vector(vector&) except + + vector(size_type) except + + vector(size_type, T&) except + + #vector[InputIt](InputIt, InputIt) + T& operator[](size_type) + #vector& operator=(vector&) + bint operator==(vector&, vector&) + bint operator!=(vector&, vector&) + bint operator<(vector&, vector&) + bint operator>(vector&, vector&) + bint operator<=(vector&, vector&) + bint operator>=(vector&, vector&) + void assign(size_type, const T&) + void assign[InputIt](InputIt, InputIt) except + + T& at(size_type) except + + T& back() + iterator begin() + const_iterator const_begin "begin"() + const_iterator cbegin() + size_type capacity() + void clear() + bint empty() + iterator end() + const_iterator const_end "end"() + const_iterator cend() + iterator erase(iterator) + iterator erase(iterator, iterator) + T& front() + iterator insert(iterator, const T&) except + + iterator insert(iterator, size_type, const T&) except + + iterator insert[InputIt](iterator, InputIt, InputIt) except + + size_type max_size() + void pop_back() + void push_back(T&) except + + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator crbegin() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const_reverse_iterator crend() + void reserve(size_type) except + + void resize(size_type) except + + void resize(size_type, T&) except + + size_type size() + void swap(vector&) + + # C++11 methods + T* data() + const T* const_data "data"() + void shrink_to_fit() except + + iterator emplace(const_iterator, ...) except + + T& emplace_back(...) except + diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/openmp.pxd b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/openmp.pxd new file mode 100644 index 0000000000000000000000000000000000000000..40f2f17a3c8f5f5b8711ac9b883b156036bda3c1 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Includes/openmp.pxd @@ -0,0 +1,50 @@ +cdef extern from "": + ctypedef struct omp_lock_t: + pass + ctypedef struct omp_nest_lock_t: + pass + + ctypedef enum omp_sched_t: + omp_sched_static = 1, + omp_sched_dynamic = 2, + omp_sched_guided = 3, + omp_sched_auto = 4 + + extern void omp_set_num_threads(int) nogil + extern int omp_get_num_threads() nogil + extern int omp_get_max_threads() nogil + extern int omp_get_thread_num() nogil + extern int omp_get_num_procs() nogil + + extern int omp_in_parallel() nogil + + extern void omp_set_dynamic(int) nogil + extern int omp_get_dynamic() nogil + + extern void omp_set_nested(int) nogil + extern int omp_get_nested() nogil + + extern void omp_init_lock(omp_lock_t *) nogil + extern void omp_destroy_lock(omp_lock_t *) nogil + extern void omp_set_lock(omp_lock_t *) nogil + extern void omp_unset_lock(omp_lock_t *) nogil + extern int omp_test_lock(omp_lock_t *) nogil + + extern void omp_init_nest_lock(omp_nest_lock_t *) nogil + extern void omp_destroy_nest_lock(omp_nest_lock_t *) nogil + extern void omp_set_nest_lock(omp_nest_lock_t *) nogil + extern void omp_unset_nest_lock(omp_nest_lock_t *) nogil + extern int omp_test_nest_lock(omp_nest_lock_t *) nogil + + extern double omp_get_wtime() nogil + extern double omp_get_wtick() nogil + + void omp_set_schedule(omp_sched_t, int) nogil + void omp_get_schedule(omp_sched_t *, int *) nogil + int omp_get_thread_limit() nogil + void omp_set_max_active_levels(int) nogil + int omp_get_max_active_levels() nogil + int omp_get_level() nogil + int omp_get_ancestor_thread_num(int) nogil + int omp_get_team_size(int) nogil + int omp_get_active_level() nogil diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Shadow.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Shadow.py new file mode 100644 index 0000000000000000000000000000000000000000..1400f1657c8f5742ded0d59946d7278739e6c9da --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Shadow.py @@ -0,0 +1,609 @@ +# cython.* namespace for pure mode. +from __future__ import absolute_import + +# Possible version formats: "3.1.0", "3.1.0a1", "3.1.0a1.dev0" +__version__ = "3.0.11" + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + + +# BEGIN shameless copy from Cython/minivect/minitypes.py + +class _ArrayType(object): + + is_array = True + subtypes = ['dtype'] + + def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False, + inner_contig=False, broadcasting=None): + self.dtype = dtype + self.ndim = ndim + self.is_c_contig = is_c_contig + self.is_f_contig = is_f_contig + self.inner_contig = inner_contig or is_c_contig or is_f_contig + self.broadcasting = broadcasting + + def __repr__(self): + axes = [":"] * self.ndim + if self.is_c_contig: + axes[-1] = "::1" + elif self.is_f_contig: + axes[0] = "::1" + + return "%s[%s]" % (self.dtype, ", ".join(axes)) + + +def index_type(base_type, item): + """ + Support array type creation by slicing, e.g. double[:, :] specifies + a 2D strided array of doubles. The syntax is the same as for + Cython memoryviews. + """ + class InvalidTypeSpecification(Exception): + pass + + def verify_slice(s): + if s.start or s.stop or s.step not in (None, 1): + raise InvalidTypeSpecification( + "Only a step of 1 may be provided to indicate C or " + "Fortran contiguity") + + if isinstance(item, tuple): + step_idx = None + for idx, s in enumerate(item): + verify_slice(s) + if s.step and (step_idx or idx not in (0, len(item) - 1)): + raise InvalidTypeSpecification( + "Step may only be provided once, and only in the " + "first or last dimension.") + + if s.step == 1: + step_idx = idx + + return _ArrayType(base_type, len(item), + is_c_contig=step_idx == len(item) - 1, + is_f_contig=step_idx == 0) + elif isinstance(item, slice): + verify_slice(item) + return _ArrayType(base_type, 1, is_c_contig=bool(item.step)) + else: + # int[8] etc. + assert int(item) == item # array size must be a plain integer + return array(base_type, item) + +# END shameless copy + + +compiled = False + +_Unspecified = object() + +# Function decorators + +def _empty_decorator(x): + return x + +def locals(**arg_types): + return _empty_decorator + +def test_assert_path_exists(*paths): + return _empty_decorator + +def test_fail_if_path_exists(*paths): + return _empty_decorator + +class _EmptyDecoratorAndManager(object): + def __call__(self, x): + return x + def __enter__(self): + pass + def __exit__(self, exc_type, exc_value, traceback): + pass + +class _Optimization(object): + pass + +cclass = ccall = cfunc = _EmptyDecoratorAndManager() + +annotation_typing = returns = wraparound = boundscheck = initializedcheck = \ + nonecheck = embedsignature = cdivision = cdivision_warnings = \ + always_allow_keywords = profile = linetrace = infer_types = \ + unraisable_tracebacks = freelist = auto_pickle = cpow = trashcan = \ + auto_cpdef = c_api_binop_methods = \ + allow_none_for_extension_args = callspec = show_performance_hints = \ + cpp_locals = py2_import = iterable_coroutine = remove_unreachable = \ + lambda _: _EmptyDecoratorAndManager() + +# Note that fast_getattr is untested and undocumented! +fast_getattr = lambda _: _EmptyDecoratorAndManager() + +exceptval = lambda _=None, check=True: _EmptyDecoratorAndManager() + +overflowcheck = lambda _: _EmptyDecoratorAndManager() +optimize = _Optimization() + + +embedsignature.format = overflowcheck.fold = optimize.use_switch = \ + optimize.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager() + +final = internal = type_version_tag = no_gc_clear = no_gc = total_ordering = \ + ufunc = _empty_decorator + +binding = lambda _: _empty_decorator + +class warn: + undeclared = unreachable = maybe_uninitialized = unused = \ + unused_arg = unused_result = \ + lambda _: _EmptyDecoratorAndManager() + + +_cython_inline = None +def inline(f, *args, **kwds): + if isinstance(f, basestring): + global _cython_inline + if _cython_inline is None: + from Cython.Build.Inline import cython_inline as _cython_inline + return _cython_inline(f, *args, **kwds) + else: + assert len(args) == len(kwds) == 0 + return f + + +def compile(f): + from Cython.Build.Inline import RuntimeCompiledFunction + return RuntimeCompiledFunction(f) + + +# Special functions + +def cdiv(a, b): + if a < 0: + a = -a + b = -b + if b < 0: + return (a + b + 1) // b + return a // b + +def cmod(a, b): + r = a % b + if (a * b) < 0 and r: + r -= b + return r + + +# Emulated language constructs + +def cast(t, *args, **kwargs): + kwargs.pop('typecheck', None) + assert not kwargs + + if isinstance(t, typedef): + return t(*args) + elif isinstance(t, type): # Doesn't work with old-style classes of Python 2.x + if len(args) != 1 or not (args[0] is None or isinstance(args[0], t)): + return t(*args) + + return args[0] + +def sizeof(arg): + return 1 + +def typeof(arg): + return arg.__class__.__name__ + # return type(arg) + +def address(arg): + return pointer(type(arg))([arg]) + +def _is_value_type(t): + if isinstance(t, typedef): + return _is_value_type(t._basetype) + + return isinstance(t, type) and issubclass(t, (StructType, UnionType, ArrayType)) + +def declare(t=None, value=_Unspecified, **kwds): + if value is not _Unspecified: + return cast(t, value) + elif _is_value_type(t): + return t() + else: + return None + +class _nogil(object): + """Support for 'with nogil' statement and @nogil decorator. + """ + def __call__(self, x): + if callable(x): + # Used as function decorator => return the function unchanged. + return x + # Used as conditional context manager or to create an "@nogil(True/False)" decorator => keep going. + return self + + def __enter__(self): + pass + def __exit__(self, exc_class, exc, tb): + return exc_class is None + +nogil = _nogil() +gil = _nogil() +with_gil = _nogil() # Actually not a context manager, but compilation will give the right error. +del _nogil + + +# Emulated types + +class CythonMetaType(type): + + def __getitem__(type, ix): + return array(type, ix) + +CythonTypeObject = CythonMetaType('CythonTypeObject', (object,), {}) + +class CythonType(CythonTypeObject): + + def _pointer(self, n=1): + for i in range(n): + self = pointer(self) + return self + +class PointerType(CythonType): + + def __init__(self, value=None): + if isinstance(value, (ArrayType, PointerType)): + self._items = [cast(self._basetype, a) for a in value._items] + elif isinstance(value, list): + self._items = [cast(self._basetype, a) for a in value] + elif value is None or value == 0: + self._items = [] + else: + raise ValueError + + def __getitem__(self, ix): + if ix < 0: + raise IndexError("negative indexing not allowed in C") + return self._items[ix] + + def __setitem__(self, ix, value): + if ix < 0: + raise IndexError("negative indexing not allowed in C") + self._items[ix] = cast(self._basetype, value) + + def __eq__(self, value): + if value is None and not self._items: + return True + elif type(self) != type(value): + return False + else: + return not self._items and not value._items + + def __repr__(self): + return "%s *" % (self._basetype,) + +class ArrayType(PointerType): + + def __init__(self, value=None): + if value is None: + self._items = [None] * self._n + else: + super(ArrayType, self).__init__(value) + + +class StructType(CythonType): + + def __init__(self, *posargs, **data): + if not (posargs or data): + return + if posargs and data: + raise ValueError('Cannot accept both positional and keyword arguments.') + + # Allow 'cast_from' as single positional or keyword argument. + if data and len(data) == 1 and 'cast_from' in data: + cast_from = data.pop('cast_from') + elif len(posargs) == 1 and type(posargs[0]) is type(self): + cast_from, posargs = posargs[0], () + elif posargs: + for key, arg in zip(self._members, posargs): + setattr(self, key, arg) + return + else: + for key, value in data.items(): + if key not in self._members: + raise ValueError("Invalid struct attribute for %s: %s" % ( + self.__class__.__name__, key)) + setattr(self, key, value) + return + + # do cast + if data: + raise ValueError('Cannot accept keyword arguments when casting.') + if type(cast_from) is not type(self): + raise ValueError('Cannot cast from %s' % cast_from) + for key, value in cast_from.__dict__.items(): + setattr(self, key, value) + + def __setattr__(self, key, value): + if key in self._members: + self.__dict__[key] = cast(self._members[key], value) + else: + raise AttributeError("Struct has no member '%s'" % key) + + +class UnionType(CythonType): + + def __init__(self, cast_from=_Unspecified, **data): + if cast_from is not _Unspecified: + # do type cast + if len(data) > 0: + raise ValueError('Cannot accept keyword arguments when casting.') + if isinstance(cast_from, dict): + datadict = cast_from + elif type(cast_from) is type(self): + datadict = cast_from.__dict__ + else: + raise ValueError('Cannot cast from %s' % cast_from) + else: + datadict = data + if len(datadict) > 1: + raise AttributeError("Union can only store one field at a time.") + for key, value in datadict.items(): + setattr(self, key, value) + + def __setattr__(self, key, value): + if key == '__dict__': + CythonType.__setattr__(self, key, value) + elif key in self._members: + self.__dict__ = {key: cast(self._members[key], value)} + else: + raise AttributeError("Union has no member '%s'" % key) + +def pointer(basetype): + class PointerInstance(PointerType): + _basetype = basetype + return PointerInstance + +def array(basetype, n): + class ArrayInstance(ArrayType): + _basetype = basetype + _n = n + return ArrayInstance + +def struct(**members): + class StructInstance(StructType): + _members = members + for key in members: + setattr(StructInstance, key, None) + return StructInstance + +def union(**members): + class UnionInstance(UnionType): + _members = members + for key in members: + setattr(UnionInstance, key, None) + return UnionInstance + +class typedef(CythonType): + + def __init__(self, type, name=None): + self._basetype = type + self.name = name + + def __call__(self, *arg): + value = cast(self._basetype, *arg) + return value + + def __repr__(self): + return self.name or str(self._basetype) + + __getitem__ = index_type + +class _FusedType(CythonType): + __getitem__ = index_type + + +def fused_type(*args): + if not args: + raise TypeError("Expected at least one type as argument") + + # Find the numeric type with biggest rank if all types are numeric + rank = -1 + for type in args: + if type not in (py_int, py_long, py_float, py_complex): + break + + if type_ordering.index(type) > rank: + result_type = type + else: + return result_type + + # Not a simple numeric type, return a fused type instance. The result + # isn't really meant to be used, as we can't keep track of the context in + # pure-mode. Casting won't do anything in this case. + return _FusedType() + + +def _specialized_from_args(signatures, args, kwargs): + "Perhaps this should be implemented in a TreeFragment in Cython code" + raise Exception("yet to be implemented") + + +py_int = typedef(int, "int") +try: + py_long = typedef(long, "long") +except NameError: # Py3 + py_long = typedef(int, "long") +py_float = typedef(float, "float") +py_complex = typedef(complex, "double complex") + + +# Predefined types + +int_types = [ + 'char', + 'short', + 'Py_UNICODE', + 'int', + 'Py_UCS4', + 'long', + 'longlong', + 'Py_hash_t', + 'Py_ssize_t', + 'size_t', + 'ssize_t', + 'ptrdiff_t', +] +float_types = [ + 'longdouble', + 'double', + 'float', +] +complex_types = [ + 'longdoublecomplex', + 'doublecomplex', + 'floatcomplex', + 'complex', +] +other_types = [ + 'bint', + 'void', + 'Py_tss_t', +] + +to_repr = { + 'longlong': 'long long', + 'longdouble': 'long double', + 'longdoublecomplex': 'long double complex', + 'doublecomplex': 'double complex', + 'floatcomplex': 'float complex', +}.get + +gs = globals() + +# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str +try: + import __builtin__ as builtins +except ImportError: # Py3 + import builtins + +gs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode') +del builtins + +for name in int_types: + reprname = to_repr(name, name) + gs[name] = typedef(py_int, reprname) + if name not in ('Py_UNICODE', 'Py_UCS4', 'Py_hash_t', 'ptrdiff_t') and not name.endswith('size_t'): + gs['u'+name] = typedef(py_int, "unsigned " + reprname) + gs['s'+name] = typedef(py_int, "signed " + reprname) + +for name in float_types: + gs[name] = typedef(py_float, to_repr(name, name)) + +for name in complex_types: + gs[name] = typedef(py_complex, to_repr(name, name)) + +bint = typedef(bool, "bint") +void = typedef(None, "void") +Py_tss_t = typedef(None, "Py_tss_t") + +for t in int_types: + for i in range(1, 4): + gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i) + if 'u'+t in gs: + gs["%s_u%s" % ('p'*i, t)] = gs['u'+t]._pointer(i) + gs["%s_s%s" % ('p'*i, t)] = gs['s'+t]._pointer(i) + +for t in float_types + complex_types + other_types: + for i in range(1, 4): + gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i) + +del t, i + +NULL = gs['p_void'](0) + +# looks like 'gs' has some users out there by now... +#del gs + +integral = floating = numeric = _FusedType() + +type_ordering = [py_int, py_long, py_float, py_complex] + +class CythonDotParallel(object): + """ + The cython.parallel module. + """ + + __all__ = ['parallel', 'prange', 'threadid'] + + def parallel(self, num_threads=None): + return nogil + + def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None): + if stop is None: + stop = start + start = 0 + return range(start, stop, step) + + def threadid(self): + return 0 + + # def threadsavailable(self): + # return 1 + +class CythonDotImportedFromElsewhere(object): + """ + cython.dataclasses just shadows the standard library modules of the same name + """ + def __init__(self, module): + self.__path__ = [] + self.__file__ = None + self.__name__ = module + self.__package__ = module + + def __getattr__(self, attr): + # we typically only expect this to be called once + from importlib import import_module + import sys + try: + mod = import_module(self.__name__) + except ImportError: + # but if they don't exist (Python is not sufficiently up-to-date) then + # you can't use them + raise AttributeError("%s: the standard library module %s is not available" % + (attr, self.__name__)) + sys.modules['cython.%s' % self.__name__] = mod + return getattr(mod, attr) + +class CythonCImports(object): + """ + Simplistic module mock to make cimports sort-of work in Python code. + """ + def __init__(self, module): + self.__path__ = [] + self.__file__ = None + self.__name__ = module + self.__package__ = module + + def __getattr__(self, item): + if item.startswith('__') and item.endswith('__'): + raise AttributeError(item) + try: + return __import__(item) + except ImportError: + import sys + ex = AttributeError(item) + if sys.version_info >= (3, 0): + ex.__cause__ = None + raise ex + + +import math, sys +sys.modules['cython.parallel'] = CythonDotParallel() +sys.modules['cython.cimports'] = CythonCImports('cython.cimports') +sys.modules['cython.cimports.libc'] = CythonCImports('cython.cimports.libc') +sys.modules['cython.cimports.libc.math'] = math +# In pure Python mode @cython.dataclasses.dataclass and dataclass field should just +# shadow the standard library ones (if they are available) +dataclasses = sys.modules['cython.dataclasses'] = CythonDotImportedFromElsewhere('dataclasses') +del math, sys diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Shadow.pyi b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Shadow.pyi new file mode 100644 index 0000000000000000000000000000000000000000..42827a3ac1222995c1181aaaba2fb29af5355280 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Shadow.pyi @@ -0,0 +1,102 @@ +from builtins import (int as py_int, float as py_float, + bool as py_bool, str as py_str, complex as py_complex) +from typing import (Union, Dict, Any, Sequence, Optional, + List, TypeVar, Type, Generic) + +int = py_int +long = py_int +longlong = py_int +short = py_int +char = py_int +sint = py_int +slong = py_int +slonglong = py_int +sshort = py_int +schar = py_int +uint = py_int +ulong = py_int +ulonglong = py_int +ushort = py_int +uchar = py_int +size_t = py_int +Py_ssize_t = py_int +Py_UCS4 = Union[py_int, str] +Py_UNICODE = Union[py_int, str] +float = py_float +double = py_float +longdouble = py_float +complex = py_complex +floatcomplex = py_complex +doublecomplex = py_complex +longdoublecomplex = py_complex +bint = py_bool +void = Union[None] +basestring = py_str +unicode = py_str + +gs: Dict[str, Any] # Should match the return type of globals() + +_T = TypeVar('_T') + +class _ArrayType(object, Generic[_T]): + is_array: bool + subtypes: Sequence[str] + dtype: _T + ndim: int + is_c_contig: bool + is_f_contig: bool + inner_contig: bool + broadcasting: Any + + # broadcasting is not used, so it's not clear about its type + def __init__(self, dtype: _T, ndim: int, is_c_contig: bool = ..., + is_f_contig: bool = ..., inner_contig: bool = ..., + broadcasting: Any = ...) -> None: ... + def __repr__(self) -> str: ... + +class CythonTypeObject(object): + ... + +class CythonType(CythonTypeObject): + ... + +class PointerType(CythonType, Generic[_T]): + def __init__( + self, + value: Optional[Union[ArrayType[_T], PointerType[_T], List[_T], int]] = ... + ) -> None: ... + def __getitem__(self, ix: int) -> _T: ... + def __setitem__(self, ix: int, value: _T) -> None: ... + def __eq__(self, value: object) -> bool: ... + def __repr__(self) -> str: ... + +class ArrayType(PointerType[_T]): + def __init__(self) -> None: ... + +#class StructType(CythonType, Generic[_T]): +# def __init__( +# self, +# value: List[Type[_T]] = ... +# ) -> None: ... + +def index_type( + base_type: _T, item: Union[tuple, slice, int]) -> _ArrayType[_T]: ... + +def pointer(basetype: _T) -> Type[PointerType[_T]]: ... + +def array(basetype: _T, n: int) -> Type[ArrayType[_T]]: ... + +#def struct(basetype: _T) -> Type[StructType[_T]]: ... + +class typedef(CythonType, Generic[_T]): + name: str + + def __init__(self, type: _T, name: Optional[str] = ...) -> None: ... + def __call__(self, *arg: Any) -> _T: ... + def __repr__(self) -> str: ... + __getitem__ = index_type + +#class _FusedType(CythonType, Generic[_T]): +# def __init__(self) -> None: ... + +#def fused_type(*args: Tuple[_T]) -> Type[FusedType[_T]]: ... diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/StringIOTree.cpython-311-x86_64-linux-gnu.so b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/StringIOTree.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2e4de0c7aaf4a5fc475247c2cdecd81669e0fd01 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/StringIOTree.cpython-311-x86_64-linux-gnu.so differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/StringIOTree.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/StringIOTree.py new file mode 100644 index 0000000000000000000000000000000000000000..798009758b9d54e81a84fd2a8dd365853919e96e --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/StringIOTree.py @@ -0,0 +1,174 @@ +# cython: auto_pickle=False + +r""" +Implements a buffer with insertion points. When you know you need to +"get back" to a place and write more later, simply call insertion_point() +at that spot and get a new StringIOTree object that is "left behind". + +EXAMPLE: + +>>> a = StringIOTree() +>>> _= a.write('first\n') +>>> b = a.insertion_point() +>>> _= a.write('third\n') +>>> _= b.write('second\n') +>>> a.getvalue().split() +['first', 'second', 'third'] + +>>> c = b.insertion_point() +>>> d = c.insertion_point() +>>> _= d.write('alpha\n') +>>> _= b.write('gamma\n') +>>> _= c.write('beta\n') +>>> b.getvalue().split() +['second', 'alpha', 'beta', 'gamma'] + +>>> try: from cStringIO import StringIO +... except ImportError: from io import StringIO + +>>> i = StringIOTree() +>>> d.insert(i) +>>> _= i.write('inserted\n') +>>> out = StringIO() +>>> a.copyto(out) +>>> out.getvalue().split() +['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third'] +""" + +from __future__ import absolute_import #, unicode_literals + +try: + # Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2. + from cStringIO import StringIO +except ImportError: + from io import StringIO + + +class StringIOTree(object): + """ + See module docs. + """ + + def __init__(self, stream=None): + self.prepended_children = [] + if stream is None: + stream = StringIO() + self.stream = stream + self.write = stream.write + self.markers = [] + + def empty(self): + if self.stream.tell(): + return False + return all([child.empty() for child in self.prepended_children]) if self.prepended_children else True + + def getvalue(self): + content = [] + self._collect_in(content) + return "".join(content) + + def _collect_in(self, target_list): + for x in self.prepended_children: + x._collect_in(target_list) + stream_content = self.stream.getvalue() + if stream_content: + target_list.append(stream_content) + + def copyto(self, target): + """Potentially cheaper than getvalue as no string concatenation + needs to happen.""" + for child in self.prepended_children: + child.copyto(target) + stream_content = self.stream.getvalue() + if stream_content: + target.write(stream_content) + + def commit(self): + # Save what we have written until now so that the buffer + # itself is empty -- this makes it ready for insertion + if self.stream.tell(): + self.prepended_children.append(StringIOTree(self.stream)) + self.prepended_children[-1].markers = self.markers + self.markers = [] + self.stream = StringIO() + self.write = self.stream.write + + def reset(self): + self.prepended_children = [] + self.markers = [] + self.stream = StringIO() + self.write = self.stream.write + + def insert(self, iotree): + """ + Insert a StringIOTree (and all of its contents) at this location. + Further writing to self appears after what is inserted. + """ + self.commit() + self.prepended_children.append(iotree) + + def insertion_point(self): + """ + Returns a new StringIOTree, which is left behind at the current position + (it what is written to the result will appear right before whatever is + next written to self). + + Calling getvalue() or copyto() on the result will only return the + contents written to it. + """ + # Save what we have written until now + # This is so that getvalue on the result doesn't include it. + self.commit() + # Construct the new forked object to return + other = StringIOTree() + self.prepended_children.append(other) + return other + + def allmarkers(self): + children = self.prepended_children + return [m for c in children for m in c.allmarkers()] + self.markers + + """ + # Print the result of allmarkers in a nice human-readable form. Use it only for debugging. + # Prints e.g. + # /path/to/source.pyx: + # cython line 2 maps to 3299-3343 + # cython line 4 maps to 2236-2245 2306 3188-3201 + # /path/to/othersource.pyx: + # cython line 3 maps to 1234-1270 + # ... + # Note: In the example above, 3343 maps to line 2, 3344 does not. + def print_hr_allmarkers(self): + from collections import defaultdict + markers = self.allmarkers() + totmap = defaultdict(lambda: defaultdict(list)) + for c_lineno, (cython_desc, cython_lineno) in enumerate(markers): + if cython_lineno > 0 and cython_desc.filename is not None: + totmap[cython_desc.filename][cython_lineno].append(c_lineno + 1) + reprstr = "" + if totmap == 0: + reprstr += "allmarkers is empty\n" + try: + sorted(totmap.items()) + except: + print(totmap) + print(totmap.items()) + for cython_path, filemap in sorted(totmap.items()): + reprstr += cython_path + ":\n" + for cython_lineno, c_linenos in sorted(filemap.items()): + reprstr += "\tcython line " + str(cython_lineno) + " maps to " + i = 0 + while i < len(c_linenos): + reprstr += str(c_linenos[i]) + flag = False + while i+1 < len(c_linenos) and c_linenos[i+1] == c_linenos[i]+1: + i += 1 + flag = True + if flag: + reprstr += "-" + str(c_linenos[i]) + " " + i += 1 + reprstr += "\n" + + import sys + sys.stdout.write(reprstr) + """ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/TestUtils.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/TestUtils.py new file mode 100644 index 0000000000000000000000000000000000000000..ed0c19793bcda7aa41e15d084b505fdb96223674 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/TestUtils.py @@ -0,0 +1,398 @@ +from __future__ import absolute_import + +import os +import re +import unittest +import shlex +import sys +import tempfile +import textwrap +from io import open +from functools import partial + +from .Compiler import Errors +from .CodeWriter import CodeWriter +from .Compiler.TreeFragment import TreeFragment, strip_common_indent +from .Compiler.Visitor import TreeVisitor, VisitorTransform +from .Compiler import TreePath + + +class NodeTypeWriter(TreeVisitor): + def __init__(self): + super(NodeTypeWriter, self).__init__() + self._indents = 0 + self.result = [] + + def visit_Node(self, node): + if not self.access_path: + name = u"(root)" + else: + tip = self.access_path[-1] + if tip[2] is not None: + name = u"%s[%d]" % tip[1:3] + else: + name = tip[1] + + self.result.append(u" " * self._indents + + u"%s: %s" % (name, node.__class__.__name__)) + self._indents += 1 + self.visitchildren(node) + self._indents -= 1 + + +def treetypes(root): + """Returns a string representing the tree by class names. + There's a leading and trailing whitespace so that it can be + compared by simple string comparison while still making test + cases look ok.""" + w = NodeTypeWriter() + w.visit(root) + return u"\n".join([u""] + w.result + [u""]) + + +class CythonTest(unittest.TestCase): + + def setUp(self): + Errors.init_thread() + + def tearDown(self): + Errors.init_thread() + + def assertLines(self, expected, result): + "Checks that the given strings or lists of strings are equal line by line" + if not isinstance(expected, list): + expected = expected.split(u"\n") + if not isinstance(result, list): + result = result.split(u"\n") + for idx, (expected_line, result_line) in enumerate(zip(expected, result)): + self.assertEqual(expected_line, result_line, + "Line %d:\nExp: %s\nGot: %s" % (idx, expected_line, result_line)) + self.assertEqual(len(expected), len(result), + "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(expected), u"\n".join(result))) + + def codeToLines(self, tree): + writer = CodeWriter() + writer.write(tree) + return writer.result.lines + + def codeToString(self, tree): + return "\n".join(self.codeToLines(tree)) + + def assertCode(self, expected, result_tree): + result_lines = self.codeToLines(result_tree) + + expected_lines = strip_common_indent(expected.split("\n")) + + for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)): + self.assertEqual(expected_line, line, + "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line)) + self.assertEqual(len(result_lines), len(expected_lines), + "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(result_lines), expected)) + + def assertNodeExists(self, path, result_tree): + self.assertNotEqual(TreePath.find_first(result_tree, path), None, + "Path '%s' not found in result tree" % path) + + def fragment(self, code, pxds=None, pipeline=None): + "Simply create a tree fragment using the name of the test-case in parse errors." + if pxds is None: + pxds = {} + if pipeline is None: + pipeline = [] + name = self.id() + if name.startswith("__main__."): + name = name[len("__main__."):] + name = name.replace(".", "_") + return TreeFragment(code, name, pxds, pipeline=pipeline) + + def treetypes(self, root): + return treetypes(root) + + def should_fail(self, func, exc_type=Exception): + """Calls "func" and fails if it doesn't raise the right exception + (any exception by default). Also returns the exception in question. + """ + try: + func() + self.fail("Expected an exception of type %r" % exc_type) + except exc_type as e: + self.assertTrue(isinstance(e, exc_type)) + return e + + def should_not_fail(self, func): + """Calls func and succeeds if and only if no exception is raised + (i.e. converts exception raising into a failed testcase). Returns + the return value of func.""" + try: + return func() + except Exception as exc: + self.fail(str(exc)) + + +class TransformTest(CythonTest): + """ + Utility base class for transform unit tests. It is based around constructing + test trees (either explicitly or by parsing a Cython code string); running + the transform, serialize it using a customized Cython serializer (with + special markup for nodes that cannot be represented in Cython), + and do a string-comparison line-by-line of the result. + + To create a test case: + - Call run_pipeline. The pipeline should at least contain the transform you + are testing; pyx should be either a string (passed to the parser to + create a post-parse tree) or a node representing input to pipeline. + The result will be a transformed result. + + - Check that the tree is correct. If wanted, assertCode can be used, which + takes a code string as expected, and a ModuleNode in result_tree + (it serializes the ModuleNode to a string and compares line-by-line). + + All code strings are first stripped for whitespace lines and then common + indentation. + + Plans: One could have a pxd dictionary parameter to run_pipeline. + """ + + def run_pipeline(self, pipeline, pyx, pxds=None): + if pxds is None: + pxds = {} + tree = self.fragment(pyx, pxds).root + # Run pipeline + for T in pipeline: + tree = T(tree) + return tree + + +# For the test C code validation, we have to take care that the test directives (and thus +# the match strings) do not just appear in (multiline) C code comments containing the original +# Cython source code. Thus, we discard the comments before matching. +# This seems a prime case for re.VERBOSE, but it seems to match some of the whitespace. +_strip_c_comments = partial(re.compile( + re.sub(r'\s+', '', r''' + /[*] ( + (?: [^*\n] | [*][^/] )* + [\n] + (?: [^*] | [*][^/] )* + ) [*]/ + ''') +).sub, '') + +_strip_cython_code_from_html = partial(re.compile( + re.sub(r'\s\s+', '', r''' + (?: +
+        (?:[^<]|<(?!/pre))+
+        
+ )|(?: + ]*> + (?:[^<]|<(?!/style))+ + + ) + ''') +).sub, '') + + +def _parse_pattern(pattern): + start = end = None + if pattern.startswith('/'): + start, pattern = re.split(r"(?= os.path.getmtime(file_path): + write_file(file_path, content, dedent=dedent, encoding=encoding) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utils.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utils.py new file mode 100644 index 0000000000000000000000000000000000000000..529f70ba3f0c91b1d2834ae65c3fd897f9930942 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utils.py @@ -0,0 +1,721 @@ +""" +Cython -- Things that don't belong anywhere else in particular +""" + +from __future__ import absolute_import + +import cython + +cython.declare( + basestring=object, + os=object, sys=object, re=object, io=object, codecs=object, glob=object, shutil=object, tempfile=object, + cython_version=object, + _function_caches=list, _parse_file_version=object, _match_file_encoding=object, +) + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + +try: + FileNotFoundError +except NameError: + FileNotFoundError = OSError + +import os +import sys +import re +import io +import codecs +import glob +import shutil +import tempfile +from functools import wraps + +from . import __version__ as cython_version + +PACKAGE_FILES = ("__init__.py", "__init__.pyc", "__init__.pyx", "__init__.pxd") + +_build_cache_name = "__{0}_cache".format +_CACHE_NAME_PATTERN = re.compile(r"^__(.+)_cache$") + +modification_time = os.path.getmtime + +GENERATED_BY_MARKER = "/* Generated by Cython %s */" % cython_version +GENERATED_BY_MARKER_BYTES = GENERATED_BY_MARKER.encode('us-ascii') + + +class _TryFinallyGeneratorContextManager(object): + """ + Fast, bare minimum @contextmanager, only for try-finally, not for exception handling. + """ + def __init__(self, gen): + self._gen = gen + + def __enter__(self): + return next(self._gen) + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + next(self._gen) + except (StopIteration, GeneratorExit): + pass + + +def try_finally_contextmanager(gen_func): + @wraps(gen_func) + def make_gen(*args, **kwargs): + return _TryFinallyGeneratorContextManager(gen_func(*args, **kwargs)) + return make_gen + + +_function_caches = [] + + +def clear_function_caches(): + for cache in _function_caches: + cache.clear() + + +def cached_function(f): + cache = {} + _function_caches.append(cache) + uncomputed = object() + + @wraps(f) + def wrapper(*args): + res = cache.get(args, uncomputed) + if res is uncomputed: + res = cache[args] = f(*args) + return res + + wrapper.uncached = f + return wrapper + + +def _find_cache_attributes(obj): + """The function iterates over the attributes of the object and, + if it finds the name of the cache, it returns it and the corresponding method name. + The method may not be present in the object. + """ + for attr_name in dir(obj): + match = _CACHE_NAME_PATTERN.match(attr_name) + if match is not None: + yield attr_name, match.group(1) + + +def clear_method_caches(obj): + """Removes every cache found in the object, + if a corresponding method exists for that cache. + """ + for cache_name, method_name in _find_cache_attributes(obj): + if hasattr(obj, method_name): + delattr(obj, cache_name) + # if there is no corresponding method, then we assume + # that this attribute was not created by our cached method + + +def cached_method(f): + cache_name = _build_cache_name(f.__name__) + + def wrapper(self, *args): + cache = getattr(self, cache_name, None) + if cache is None: + cache = {} + setattr(self, cache_name, cache) + if args in cache: + return cache[args] + res = cache[args] = f(self, *args) + return res + + return wrapper + + +def replace_suffix(path, newsuf): + base, _ = os.path.splitext(path) + return base + newsuf + + +def open_new_file(path): + if os.path.exists(path): + # Make sure to create a new file here so we can + # safely hard link the output files. + os.unlink(path) + + # we use the ISO-8859-1 encoding here because we only write pure + # ASCII strings or (e.g. for file names) byte encoded strings as + # Unicode, so we need a direct mapping from the first 256 Unicode + # characters to a byte sequence, which ISO-8859-1 provides + + # note: can't use io.open() in Py2 as we may be writing str objects + return codecs.open(path, "w", encoding="ISO-8859-1") + + +def castrate_file(path, st): + # Remove junk contents from an output file after a + # failed compilation. + # Also sets access and modification times back to + # those specified by st (a stat struct). + if not is_cython_generated_file(path, allow_failed=True, if_not_found=False): + return + + try: + f = open_new_file(path) + except EnvironmentError: + pass + else: + f.write( + "#error Do not use this file, it is the result of a failed Cython compilation.\n") + f.close() + if st: + os.utime(path, (st.st_atime, st.st_mtime-1)) + + +def is_cython_generated_file(path, allow_failed=False, if_not_found=True): + failure_marker = b"#error Do not use this file, it is the result of a failed Cython compilation." + file_content = None + if os.path.exists(path): + try: + with open(path, "rb") as f: + file_content = f.read(len(failure_marker)) + except (OSError, IOError): + pass # Probably just doesn't exist any more + + if file_content is None: + # file does not exist (yet) + return if_not_found + + return ( + # Cython C file? + file_content.startswith(b"/* Generated by Cython ") or + # Cython output file after previous failures? + (allow_failed and file_content == failure_marker) or + # Let's allow overwriting empty files as well. They might have resulted from previous failures. + not file_content + ) + + +def file_generated_by_this_cython(path): + file_content = b'' + if os.path.exists(path): + try: + with open(path, "rb") as f: + file_content = f.read(len(GENERATED_BY_MARKER_BYTES)) + except (OSError, IOError): + pass # Probably just doesn't exist any more + return file_content and file_content.startswith(GENERATED_BY_MARKER_BYTES) + + +def file_newer_than(path, time): + ftime = modification_time(path) + return ftime > time + + +def safe_makedirs(path): + try: + os.makedirs(path) + except OSError: + if not os.path.isdir(path): + raise + + +def copy_file_to_dir_if_newer(sourcefile, destdir): + """ + Copy file sourcefile to directory destdir (creating it if needed), + preserving metadata. If the destination file exists and is not + older than the source file, the copying is skipped. + """ + destfile = os.path.join(destdir, os.path.basename(sourcefile)) + try: + desttime = modification_time(destfile) + except OSError: + # New file does not exist, destdir may or may not exist + safe_makedirs(destdir) + else: + # New file already exists + if not file_newer_than(sourcefile, desttime): + return + shutil.copy2(sourcefile, destfile) + + +@cached_function +def find_root_package_dir(file_path): + dir = os.path.dirname(file_path) + if file_path == dir: + return dir + elif is_package_dir(dir): + return find_root_package_dir(dir) + else: + return dir + + +@cached_function +def check_package_dir(dir_path, package_names): + namespace = True + for dirname in package_names: + dir_path = os.path.join(dir_path, dirname) + has_init = contains_init(dir_path) + if has_init: + namespace = False + return dir_path, namespace + + +@cached_function +def contains_init(dir_path): + for filename in PACKAGE_FILES: + path = os.path.join(dir_path, filename) + if path_exists(path): + return 1 + + +def is_package_dir(dir_path): + if contains_init(dir_path): + return 1 + + +@cached_function +def path_exists(path): + # try on the filesystem first + if os.path.exists(path): + return True + # figure out if a PEP 302 loader is around + try: + loader = __loader__ + # XXX the code below assumes a 'zipimport.zipimporter' instance + # XXX should be easy to generalize, but too lazy right now to write it + archive_path = getattr(loader, 'archive', None) + if archive_path: + normpath = os.path.normpath(path) + if normpath.startswith(archive_path): + arcname = normpath[len(archive_path)+1:] + try: + loader.get_data(arcname) + return True + except IOError: + return False + except NameError: + pass + return False + + +_parse_file_version = re.compile(r".*[.]cython-([0-9]+)[.][^./\\]+$").findall + + +@cached_function +def find_versioned_file(directory, filename, suffix, + _current_version=int(re.sub(r"^([0-9]+)[.]([0-9]+).*", r"\1\2", cython_version))): + """ + Search a directory for versioned pxd files, e.g. "lib.cython-30.pxd" for a Cython 3.0+ version. + + @param directory: the directory to search + @param filename: the filename without suffix + @param suffix: the filename extension including the dot, e.g. ".pxd" + @return: the file path if found, or None + """ + assert not suffix or suffix[:1] == '.' + path_prefix = os.path.join(directory, filename) + + matching_files = glob.glob( + (glob.escape(path_prefix) if sys.version_info >= (3, 4) else + ''.join([ '['+c+']' if c in '[*?' else c for c in path_prefix])) + + ".cython-*" + suffix) + path = path_prefix + suffix + if not os.path.exists(path): + path = None + best_match = (-1, path) # last resort, if we do not have versioned .pxd files + + for path in matching_files: + versions = _parse_file_version(path) + if versions: + int_version = int(versions[0]) + # Let's assume no duplicates. + if best_match[0] < int_version <= _current_version: + best_match = (int_version, path) + return best_match[1] + + +# file name encodings + +def decode_filename(filename): + if isinstance(filename, bytes): + try: + filename_encoding = sys.getfilesystemencoding() + if filename_encoding is None: + filename_encoding = sys.getdefaultencoding() + filename = filename.decode(filename_encoding) + except UnicodeDecodeError: + pass + return filename + + +# support for source file encoding detection + +_match_file_encoding = re.compile(br"(\w*coding)[:=]\s*([-\w.]+)").search + + +def detect_opened_file_encoding(f, default='UTF-8'): + # PEPs 263 and 3120 + # Most of the time the first two lines fall in the first couple of hundred chars, + # and this bulk read/split is much faster. + lines = () + start = b'' + while len(lines) < 3: + data = f.read(500) + start += data + lines = start.split(b"\n") + if not data: + break + + m = _match_file_encoding(lines[0]) + if m and m.group(1) != b'c_string_encoding': + return m.group(2).decode('iso8859-1') + elif len(lines) > 1: + m = _match_file_encoding(lines[1]) + if m: + return m.group(2).decode('iso8859-1') + return default + + +def skip_bom(f): + """ + Read past a BOM at the beginning of a source file. + This could be added to the scanner, but it's *substantially* easier + to keep it at this level. + """ + if f.read(1) != u'\uFEFF': + f.seek(0) + + +def open_source_file(source_filename, encoding=None, error_handling=None): + stream = None + try: + if encoding is None: + # Most of the time the encoding is not specified, so try hard to open the file only once. + f = io.open(source_filename, 'rb') + encoding = detect_opened_file_encoding(f) + f.seek(0) + stream = io.TextIOWrapper(f, encoding=encoding, errors=error_handling) + else: + stream = io.open(source_filename, encoding=encoding, errors=error_handling) + + except OSError: + if os.path.exists(source_filename): + raise # File is there, but something went wrong reading from it. + # Allow source files to be in zip files etc. + try: + loader = __loader__ + if source_filename.startswith(loader.archive): + stream = open_source_from_loader( + loader, source_filename, + encoding, error_handling) + except (NameError, AttributeError): + pass + + if stream is None: + raise FileNotFoundError(source_filename) + skip_bom(stream) + return stream + + +def open_source_from_loader(loader, + source_filename, + encoding=None, error_handling=None): + nrmpath = os.path.normpath(source_filename) + arcname = nrmpath[len(loader.archive)+1:] + data = loader.get_data(arcname) + return io.TextIOWrapper(io.BytesIO(data), + encoding=encoding, + errors=error_handling) + + +def str_to_number(value): + # note: this expects a string as input that was accepted by the + # parser already, with an optional "-" sign in front + is_neg = False + if value[:1] == '-': + is_neg = True + value = value[1:] + if len(value) < 2: + value = int(value, 0) + elif value[0] == '0': + literal_type = value[1] # 0'o' - 0'b' - 0'x' + if literal_type in 'xX': + # hex notation ('0x1AF') + value = strip_py2_long_suffix(value) + value = int(value[2:], 16) + elif literal_type in 'oO': + # Py3 octal notation ('0o136') + value = int(value[2:], 8) + elif literal_type in 'bB': + # Py3 binary notation ('0b101') + value = int(value[2:], 2) + else: + # Py2 octal notation ('0136') + value = int(value, 8) + else: + value = int(value, 0) + return -value if is_neg else value + + +def strip_py2_long_suffix(value_str): + """ + Python 2 likes to append 'L' to stringified numbers + which in then can't process when converting them to numbers. + """ + if value_str[-1] in 'lL': + return value_str[:-1] + return value_str + + +def long_literal(value): + if isinstance(value, basestring): + value = str_to_number(value) + return not -2**31 <= value < 2**31 + + +@cached_function +def get_cython_cache_dir(): + r""" + Return the base directory containing Cython's caches. + + Priority: + + 1. CYTHON_CACHE_DIR + 2. (OS X): ~/Library/Caches/Cython + (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined + 3. ~/.cython + + """ + if 'CYTHON_CACHE_DIR' in os.environ: + return os.environ['CYTHON_CACHE_DIR'] + + parent = None + if os.name == 'posix': + if sys.platform == 'darwin': + parent = os.path.expanduser('~/Library/Caches') + else: + # this could fallback on ~/.cache + parent = os.environ.get('XDG_CACHE_HOME') + + if parent and os.path.isdir(parent): + return os.path.join(parent, 'cython') + + # last fallback: ~/.cython + return os.path.expanduser(os.path.join('~', '.cython')) + + +@try_finally_contextmanager +def captured_fd(stream=2, encoding=None): + orig_stream = os.dup(stream) # keep copy of original stream + try: + with tempfile.TemporaryFile(mode="a+b") as temp_file: + def read_output(_output=[b'']): + if not temp_file.closed: + temp_file.seek(0) + _output[0] = temp_file.read() + return _output[0] + + os.dup2(temp_file.fileno(), stream) # replace stream by copy of pipe + def get_output(): + result = read_output() + return result.decode(encoding) if encoding else result + + yield get_output + # note: @contextlib.contextmanager requires try-finally here + os.dup2(orig_stream, stream) # restore original stream + read_output() # keep the output in case it's used after closing the context manager + finally: + os.close(orig_stream) + + +def get_encoding_candidates(): + candidates = [sys.getdefaultencoding()] + for stream in (sys.stdout, sys.stdin, sys.__stdout__, sys.__stdin__): + encoding = getattr(stream, 'encoding', None) + # encoding might be None (e.g. somebody redirects stdout): + if encoding is not None and encoding not in candidates: + candidates.append(encoding) + return candidates + + +def prepare_captured(captured): + captured_bytes = captured.strip() + if not captured_bytes: + return None + for encoding in get_encoding_candidates(): + try: + return captured_bytes.decode(encoding) + except UnicodeDecodeError: + pass + # last resort: print at least the readable ascii parts correctly. + return captured_bytes.decode('latin-1') + + +def print_captured(captured, output, header_line=None): + captured = prepare_captured(captured) + if captured: + if header_line: + output.write(header_line) + output.write(captured) + + +def print_bytes(s, header_text=None, end=b'\n', file=sys.stdout, flush=True): + if header_text: + file.write(header_text) # note: text! => file.write() instead of out.write() + file.flush() + try: + out = file.buffer # Py3 + except AttributeError: + out = file # Py2 + out.write(s) + if end: + out.write(end) + if flush: + out.flush() + + +class OrderedSet(object): + def __init__(self, elements=()): + self._list = [] + self._set = set() + self.update(elements) + + def __iter__(self): + return iter(self._list) + + def update(self, elements): + for e in elements: + self.add(e) + + def add(self, e): + if e not in self._set: + self._list.append(e) + self._set.add(e) + + def __bool__(self): + return bool(self._set) + + __nonzero__ = __bool__ + + +# Class decorator that adds a metaclass and recreates the class with it. +# Copied from 'six'. +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def raise_error_if_module_name_forbidden(full_module_name): + # it is bad idea to call the pyx-file cython.pyx, so fail early + if full_module_name == 'cython' or full_module_name.startswith('cython.'): + raise ValueError('cython is a special module, cannot be used as a module name') + + +def build_hex_version(version_string): + """ + Parse and translate public version identifier like '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX). + + SEE: https://peps.python.org/pep-0440/#public-version-identifiers + """ + # Parse '4.12a1' into [4, 12, 0, 0xA01] + # And ignore .dev, .pre and .post segments + digits = [] + release_status = 0xF0 + for segment in re.split(r'(\D+)', version_string): + if segment in ('a', 'b', 'rc'): + release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[segment] + digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1 + elif segment in ('.dev', '.pre', '.post'): + break # break since those are the last segments + elif segment != '.': + digits.append(int(segment)) + + digits = (digits + [0] * 3)[:4] + digits[3] += release_status + + # Then, build a single hex value, two hex digits per version part. + hexversion = 0 + for digit in digits: + hexversion = (hexversion << 8) + digit + + return '0x%08X' % hexversion + + +def write_depfile(target, source, dependencies): + src_base_dir = os.path.dirname(source) + cwd = os.getcwd() + if not src_base_dir.endswith(os.sep): + src_base_dir += os.sep + # paths below the base_dir are relative, otherwise absolute + paths = [] + for fname in dependencies: + if fname.startswith(src_base_dir): + try: + newpath = os.path.relpath(fname, cwd) + except ValueError: + # if they are on different Windows drives, absolute is fine + newpath = os.path.abspath(fname) + else: + newpath = os.path.abspath(fname) + paths.append(newpath) + + depline = os.path.relpath(target, cwd) + ": \\\n " + depline += " \\\n ".join(paths) + "\n" + + with open(target+'.dep', 'w') as outfile: + outfile.write(depline) + + +def print_version(): + print("Cython version %s" % cython_version) + # For legacy reasons, we also write the version to stderr. + # New tools should expect it in stdout, but existing ones still pipe from stderr, or from both. + if sys.stderr.isatty() or sys.stdout == sys.stderr: + return + if os.fstat(1) == os.fstat(2): + # This is somewhat unsafe since sys.stdout/err might not really be linked to streams 1/2. + # However, in most *relevant* cases, where Cython is run as an external tool, they are linked. + return + sys.stderr.write("Cython version %s\n" % cython_version) + + +def normalise_float_repr(float_str): + """ + Generate a 'normalised', simple digits string representation of a float value + to allow string comparisons. Examples: '.123', '123.456', '123.' + """ + str_value = float_str.lower().lstrip('0') + + exp = 0 + if 'E' in str_value or 'e' in str_value: + str_value, exp = str_value.split('E' if 'E' in str_value else 'e', 1) + exp = int(exp) + + if '.' in str_value: + num_int_digits = str_value.index('.') + str_value = str_value[:num_int_digits] + str_value[num_int_digits + 1:] + else: + num_int_digits = len(str_value) + exp += num_int_digits + + result = ( + str_value[:exp] + + '0' * (exp - len(str_value)) + + '.' + + '0' * -exp + + str_value[exp:] + ).rstrip('0') + + return result if result != '.' else '.0' diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..549246b8a378c4586e602884a93b317987b7101c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__init__.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from .Shadow import __version__ + +# Void cython.* directives (for case insensitive operating systems). +from .Shadow import * + + +def load_ipython_extension(ip): + """Load the extension in IPython.""" + from .Build.IpythonMagic import CythonMagics # pylint: disable=cyclic-import + ip.register_magics(CythonMagics) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e769e4d2d4363cb5b81b8ea0f9fb2fe068c5388 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/nx_agraph.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/nx_agraph.py new file mode 100644 index 0000000000000000000000000000000000000000..8db3400f20041ed8d6ce23b1b4cb937ac192c0ee --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/nx_agraph.py @@ -0,0 +1,469 @@ +""" +*************** +Graphviz AGraph +*************** + +Interface to pygraphviz AGraph class. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> A = nx.nx_agraph.to_agraph(G) +>>> H = nx.nx_agraph.from_agraph(A) + +See Also +-------- + - Pygraphviz: http://pygraphviz.github.io/ + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" +import os +import tempfile + +import networkx as nx + +__all__ = [ + "from_agraph", + "to_agraph", + "write_dot", + "read_dot", + "graphviz_layout", + "pygraphviz_layout", + "view_pygraphviz", +] + + +@nx._dispatch(graphs=None) +def from_agraph(A, create_using=None): + """Returns a NetworkX Graph or DiGraph from a PyGraphviz graph. + + Parameters + ---------- + A : PyGraphviz AGraph + A graph created with PyGraphviz + + create_using : NetworkX graph constructor, optional (default=None) + Graph type to create. If graph instance, then cleared before populated. + If `None`, then the appropriate Graph type is inferred from `A`. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + >>> G = nx.nx_agraph.from_agraph(A) + + Notes + ----- + The Graph G will have a dictionary G.graph_attr containing + the default graphviz attributes for graphs, nodes and edges. + + Default node attributes will be in the dictionary G.node_attr + which is keyed by node. + + Edge attributes will be returned as edge data in G. With + edge_attr=False the edge data will be the Graphviz edge weight + attribute or the value 1 if no edge weight attribute is found. + + """ + if create_using is None: + if A.is_directed(): + if A.is_strict(): + create_using = nx.DiGraph + else: + create_using = nx.MultiDiGraph + else: + if A.is_strict(): + create_using = nx.Graph + else: + create_using = nx.MultiGraph + + # assign defaults + N = nx.empty_graph(0, create_using) + if A.name is not None: + N.name = A.name + + # add graph attributes + N.graph.update(A.graph_attr) + + # add nodes, attributes to N.node_attr + for n in A.nodes(): + str_attr = {str(k): v for k, v in n.attr.items()} + N.add_node(str(n), **str_attr) + + # add edges, assign edge data as dictionary of attributes + for e in A.edges(): + u, v = str(e[0]), str(e[1]) + attr = dict(e.attr) + str_attr = {str(k): v for k, v in attr.items()} + if not N.is_multigraph(): + if e.name is not None: + str_attr["key"] = e.name + N.add_edge(u, v, **str_attr) + else: + N.add_edge(u, v, key=e.name, **str_attr) + + # add default attributes for graph, nodes, and edges + # hang them on N.graph_attr + N.graph["graph"] = dict(A.graph_attr) + N.graph["node"] = dict(A.node_attr) + N.graph["edge"] = dict(A.edge_attr) + return N + + +def to_agraph(N): + """Returns a pygraphviz graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + + Notes + ----- + If N has an dict N.graph_attr an attempt will be made first + to copy properties attached to the graph (see from_agraph) + and then updated with the calling arguments if any. + + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + directed = N.is_directed() + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + + for node in N: + if "pos" in N.nodes[node]: + N.nodes[node]["pos"] = "{},{}!".format( + N.nodes[node]["pos"][0], N.nodes[node]["pos"][1] + ) + + A = pygraphviz.AGraph(name=N.name, strict=strict, directed=directed) + + # default graph attributes + A.graph_attr.update(N.graph.get("graph", {})) + A.node_attr.update(N.graph.get("node", {})) + A.edge_attr.update(N.graph.get("edge", {})) + + A.graph_attr.update( + (k, v) for k, v in N.graph.items() if k not in ("graph", "node", "edge") + ) + + # add nodes + for n, nodedata in N.nodes(data=True): + A.add_node(n) + # Add node data + a = A.get_node(n) + a.attr.update({k: str(v) for k, v in nodedata.items()}) + + # loop over edges + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {k: str(v) for k, v in edgedata.items() if k != "key"} + A.add_edge(u, v, key=str(key)) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {k: str(v) for k, v in edgedata.items()} + A.add_edge(u, v) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + return A + + +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Parameters + ---------- + G : graph + A networkx graph + path : filename + Filename or file handle to write + + Notes + ----- + To use a specific graph layout, call ``A.layout`` prior to `write_dot`. + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + A = to_agraph(G) + A.write(path) + A.clear() + return + + +@nx._dispatch(name="agraph_read_dot", graphs=None) +def read_dot(path): + """Returns a NetworkX graph from a dot file on path. + + Parameters + ---------- + path : file or string + File name or file handle to read. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "read_dot() requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + A = pygraphviz.AGraph(file=path) + gr = from_agraph(A) + A.clear() + return gr + + +def graphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pygraphviz_layout. + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + return pygraphviz_layout(G, prog=prog, root=root, args=args) + + +def pygraphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + node_pos : dict + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + >>> H = nx.convert_node_labels_to_integers(G, label_attribute="node_label") + >>> H_layout = nx.nx_agraph.pygraphviz_layout(G, prog="dot") + >>> G_layout = {H.nodes[n]["node_label"]: p for n, p in H_layout.items()} + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + if root is not None: + args += f"-Groot={root}" + A = to_agraph(G) + A.layout(prog=prog, args=args) + node_pos = {} + for n in G: + node = pygraphviz.Node(A, n) + try: + xs = node.attr["pos"].split(",") + node_pos[n] = tuple(float(x) for x in xs) + except: + print("no position for node", n) + node_pos[n] = (0.0, 0.0) + return node_pos + + +@nx.utils.open_file(5, "w+b") +def view_pygraphviz( + G, edgelabel=None, prog="dot", args="", suffix="", path=None, show=True +): + """Views the graph G using the specified layout algorithm. + + Parameters + ---------- + G : NetworkX graph + The machine to draw. + edgelabel : str, callable, None + If a string, then it specifies the edge attribute to be displayed + on the edge labels. If a callable, then it is called for each + edge and it should return the string to be displayed on the edges. + The function signature of `edgelabel` should be edgelabel(data), + where `data` is the edge attribute dictionary. + prog : string + Name of Graphviz layout program. + args : str + Additional arguments to pass to the Graphviz layout program. + suffix : str + If `filename` is None, we save to a temporary file. The value of + `suffix` will appear at the tail end of the temporary filename. + path : str, None + The filename used to save the image. If None, save to a temporary + file. File formats are the same as those from pygraphviz.agraph.draw. + show : bool, default = True + Whether to display the graph with :mod:`PIL.Image.show`, + default is `True`. If `False`, the rendered graph is still available + at `path`. + + Returns + ------- + path : str + The filename of the generated image. + A : PyGraphviz graph + The PyGraphviz graph instance used to generate the image. + + Notes + ----- + If this function is called in succession too quickly, sometimes the + image is not displayed. So you might consider time.sleep(.5) between + calls if you experience problems. + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + + """ + if not len(G): + raise nx.NetworkXException("An empty graph cannot be drawn.") + + # If we are providing default values for graphviz, these must be set + # before any nodes or edges are added to the PyGraphviz graph object. + # The reason for this is that default values only affect incoming objects. + # If you change the default values after the objects have been added, + # then they inherit no value and are set only if explicitly set. + + # to_agraph() uses these values. + attrs = ["edge", "node", "graph"] + for attr in attrs: + if attr not in G.graph: + G.graph[attr] = {} + + # These are the default values. + edge_attrs = {"fontsize": "10"} + node_attrs = { + "style": "filled", + "fillcolor": "#0000FF40", + "height": "0.75", + "width": "0.75", + "shape": "circle", + } + graph_attrs = {} + + def update_attrs(which, attrs): + # Update graph attributes. Return list of those which were added. + added = [] + for k, v in attrs.items(): + if k not in G.graph[which]: + G.graph[which][k] = v + added.append(k) + + def clean_attrs(which, added): + # Remove added attributes + for attr in added: + del G.graph[which][attr] + if not G.graph[which]: + del G.graph[which] + + # Update all default values + update_attrs("edge", edge_attrs) + update_attrs("node", node_attrs) + update_attrs("graph", graph_attrs) + + # Convert to agraph, so we inherit default values + A = to_agraph(G) + + # Remove the default values we added to the original graph. + clean_attrs("edge", edge_attrs) + clean_attrs("node", node_attrs) + clean_attrs("graph", graph_attrs) + + # If the user passed in an edgelabel, we update the labels for all edges. + if edgelabel is not None: + if not callable(edgelabel): + + def func(data): + return "".join([" ", str(data[edgelabel]), " "]) + + else: + func = edgelabel + + # update all the edge labels + if G.is_multigraph(): + for u, v, key, data in G.edges(keys=True, data=True): + # PyGraphviz doesn't convert the key to a string. See #339 + edge = A.get_edge(u, v, str(key)) + edge.attr["label"] = str(func(data)) + else: + for u, v, data in G.edges(data=True): + edge = A.get_edge(u, v) + edge.attr["label"] = str(func(data)) + + if path is None: + ext = "png" + if suffix: + suffix = f"_{suffix}.{ext}" + else: + suffix = f".{ext}" + path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) + else: + # Assume the decorator worked and it is a file-object. + pass + + # Write graph to file + A.draw(path=path, format=None, prog=prog, args=args) + path.close() + + # Show graph in a new window (depends on platform configuration) + if show: + from PIL import Image + + Image.open(path.name).show() + + return path.name, A diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cda3513b5d594db18268a5a7ae338394140f572 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/__pycache__/test_agraph.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_latex.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_latex.py new file mode 100644 index 0000000000000000000000000000000000000000..14ab5423299c3d3f7a606d1df81a30d77877910b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_latex.py @@ -0,0 +1,292 @@ +import pytest + +import networkx as nx + + +def test_tikz_attributes(): + G = nx.path_graph(4, create_using=nx.DiGraph) + pos = {n: (n, n) for n in G} + + G.add_edge(0, 0) + G.edges[(0, 0)]["label"] = "Loop" + G.edges[(0, 0)]["label_options"] = "midway" + + G.nodes[0]["style"] = "blue" + G.nodes[1]["style"] = "line width=3,draw" + G.nodes[2]["style"] = "circle,draw,blue!50" + G.nodes[3]["label"] = "Stop" + G.edges[(0, 1)]["label"] = "1st Step" + G.edges[(0, 1)]["label_options"] = "near end" + G.edges[(2, 3)]["label"] = "3rd Step" + G.edges[(2, 3)]["label_options"] = "near start" + G.edges[(2, 3)]["style"] = "bend left,green" + G.edges[(1, 2)]["label"] = "2nd" + G.edges[(1, 2)]["label_options"] = "pos=0.5" + G.edges[(1, 2)]["style"] = ">->,bend right,line width=3,green!90" + + output_tex = nx.to_latex( + G, + pos=pos, + as_document=False, + tikz_options="[scale=3]", + node_options="style", + edge_options="style", + node_label="label", + edge_label="label", + edge_label_options="label_options", + ) + expected_tex = r"""\begin{figure} + \begin{tikzpicture}[scale=3] + \draw + (0, 0) node[blue] (0){0} + (1, 1) node[line width=3,draw] (1){1} + (2, 2) node[circle,draw,blue!50] (2){2} + (3, 3) node (3){Stop}; + \begin{scope}[->] + \draw (0) to node[near end] {1st Step} (1); + \draw[loop,] (0) to node[midway] {Loop} (0); + \draw[>->,bend right,line width=3,green!90] (1) to node[pos=0.5] {2nd} (2); + \draw[bend left,green] (2) to node[near start] {3rd Step} (3); + \end{scope} + \end{tikzpicture} +\end{figure}""" + + assert output_tex == expected_tex + # print(output_tex) + # # Pretty way to assert that A.to_document() == expected_tex + # content_same = True + # for aa, bb in zip(expected_tex.split("\n"), output_tex.split("\n")): + # if aa != bb: + # content_same = False + # print(f"-{aa}|\n+{bb}|") + # assert content_same + + +def test_basic_multiple_graphs(): + H1 = nx.path_graph(4) + H2 = nx.complete_graph(4) + H3 = nx.path_graph(8) + H4 = nx.complete_graph(8) + captions = [ + "Path on 4 nodes", + "Complete graph on 4 nodes", + "Path on 8 nodes", + "Complete graph on 8 nodes", + ] + labels = ["fig2a", "fig2b", "fig2c", "fig2d"] + latex_code = nx.to_latex( + [H1, H2, H3, H4], + n_rows=2, + sub_captions=captions, + sub_labels=labels, + ) + # print(latex_code) + assert "begin{document}" in latex_code + assert "begin{figure}" in latex_code + assert latex_code.count("begin{subfigure}") == 4 + assert latex_code.count("tikzpicture") == 8 + assert latex_code.count("[-]") == 4 + + +def test_basic_tikz(): + expected_tex = r"""\documentclass{report} +\usepackage{tikz} +\usepackage{subcaption} + +\begin{document} +\begin{figure} + \begin{subfigure}{0.5\textwidth} + \begin{tikzpicture}[scale=2] + \draw[gray!90] + (0.749, 0.702) node[red!90] (0){0} + (1.0, -0.014) node[red!90] (1){1} + (-0.777, -0.705) node (2){2} + (-0.984, 0.042) node (3){3} + (-0.028, 0.375) node[cyan!90] (4){4} + (-0.412, 0.888) node (5){5} + (0.448, -0.856) node (6){6} + (0.003, -0.431) node[cyan!90] (7){7}; + \begin{scope}[->,gray!90] + \draw (0) to (4); + \draw (0) to (5); + \draw (0) to (6); + \draw (0) to (7); + \draw (1) to (4); + \draw (1) to (5); + \draw (1) to (6); + \draw (1) to (7); + \draw (2) to (4); + \draw (2) to (5); + \draw (2) to (6); + \draw (2) to (7); + \draw (3) to (4); + \draw (3) to (5); + \draw (3) to (6); + \draw (3) to (7); + \end{scope} + \end{tikzpicture} + \caption{My tikz number 1 of 2}\label{tikz_1_2} + \end{subfigure} + \begin{subfigure}{0.5\textwidth} + \begin{tikzpicture}[scale=2] + \draw[gray!90] + (0.749, 0.702) node[green!90] (0){0} + (1.0, -0.014) node[green!90] (1){1} + (-0.777, -0.705) node (2){2} + (-0.984, 0.042) node (3){3} + (-0.028, 0.375) node[purple!90] (4){4} + (-0.412, 0.888) node (5){5} + (0.448, -0.856) node (6){6} + (0.003, -0.431) node[purple!90] (7){7}; + \begin{scope}[->,gray!90] + \draw (0) to (4); + \draw (0) to (5); + \draw (0) to (6); + \draw (0) to (7); + \draw (1) to (4); + \draw (1) to (5); + \draw (1) to (6); + \draw (1) to (7); + \draw (2) to (4); + \draw (2) to (5); + \draw (2) to (6); + \draw (2) to (7); + \draw (3) to (4); + \draw (3) to (5); + \draw (3) to (6); + \draw (3) to (7); + \end{scope} + \end{tikzpicture} + \caption{My tikz number 2 of 2}\label{tikz_2_2} + \end{subfigure} + \caption{A graph generated with python and latex.} +\end{figure} +\end{document}""" + + edges = [ + (0, 4), + (0, 5), + (0, 6), + (0, 7), + (1, 4), + (1, 5), + (1, 6), + (1, 7), + (2, 4), + (2, 5), + (2, 6), + (2, 7), + (3, 4), + (3, 5), + (3, 6), + (3, 7), + ] + G = nx.DiGraph() + G.add_nodes_from(range(8)) + G.add_edges_from(edges) + pos = { + 0: (0.7490296171687696, 0.702353520257394), + 1: (1.0, -0.014221357723796535), + 2: (-0.7765783344161441, -0.7054170966808919), + 3: (-0.9842690223417624, 0.04177547602465483), + 4: (-0.02768523817180917, 0.3745724439551441), + 5: (-0.41154855146767433, 0.8880106515525136), + 6: (0.44780153389148264, -0.8561492709269164), + 7: (0.0032499953371383505, -0.43092436645809945), + } + + rc_node_color = {0: "red!90", 1: "red!90", 4: "cyan!90", 7: "cyan!90"} + gp_node_color = {0: "green!90", 1: "green!90", 4: "purple!90", 7: "purple!90"} + + H = G.copy() + nx.set_node_attributes(G, rc_node_color, "color") + nx.set_node_attributes(H, gp_node_color, "color") + + sub_captions = ["My tikz number 1 of 2", "My tikz number 2 of 2"] + sub_labels = ["tikz_1_2", "tikz_2_2"] + + output_tex = nx.to_latex( + [G, H], + [pos, pos], + tikz_options="[scale=2]", + default_node_options="gray!90", + default_edge_options="gray!90", + node_options="color", + sub_captions=sub_captions, + sub_labels=sub_labels, + caption="A graph generated with python and latex.", + n_rows=2, + as_document=True, + ) + + assert output_tex == expected_tex + # print(output_tex) + # # Pretty way to assert that A.to_document() == expected_tex + # content_same = True + # for aa, bb in zip(expected_tex.split("\n"), output_tex.split("\n")): + # if aa != bb: + # content_same = False + # print(f"-{aa}|\n+{bb}|") + # assert content_same + + +def test_exception_pos_single_graph(to_latex=nx.to_latex): + # smoke test that pos can be a string + G = nx.path_graph(4) + to_latex(G, pos="pos") + + # must include all nodes + pos = {0: (1, 2), 1: (0, 1), 2: (2, 1)} + with pytest.raises(nx.NetworkXError): + to_latex(G, pos) + + # must have 2 values + pos[3] = (1, 2, 3) + with pytest.raises(nx.NetworkXError): + to_latex(G, pos) + pos[3] = 2 + with pytest.raises(nx.NetworkXError): + to_latex(G, pos) + + # check that passes with 2 values + pos[3] = (3, 2) + to_latex(G, pos) + + +def test_exception_multiple_graphs(to_latex=nx.to_latex): + G = nx.path_graph(3) + pos_bad = {0: (1, 2), 1: (0, 1)} + pos_OK = {0: (1, 2), 1: (0, 1), 2: (2, 1)} + fourG = [G, G, G, G] + fourpos = [pos_OK, pos_OK, pos_OK, pos_OK] + + # input single dict to use for all graphs + to_latex(fourG, pos_OK) + with pytest.raises(nx.NetworkXError): + to_latex(fourG, pos_bad) + + # input list of dicts to use for all graphs + to_latex(fourG, fourpos) + with pytest.raises(nx.NetworkXError): + to_latex(fourG, [pos_bad, pos_bad, pos_bad, pos_bad]) + + # every pos dict must include all nodes + with pytest.raises(nx.NetworkXError): + to_latex(fourG, [pos_OK, pos_OK, pos_bad, pos_OK]) + + # test sub_captions and sub_labels (len must match Gbunch) + with pytest.raises(nx.NetworkXError): + to_latex(fourG, fourpos, sub_captions=["hi", "hi"]) + + with pytest.raises(nx.NetworkXError): + to_latex(fourG, fourpos, sub_labels=["hi", "hi"]) + + # all pass + to_latex(fourG, fourpos, sub_captions=["hi"] * 4, sub_labels=["lbl"] * 4) + + +def test_exception_multigraph(): + G = nx.path_graph(4, create_using=nx.MultiGraph) + G.add_edge(1, 2) + with pytest.raises(nx.NetworkXNotImplemented): + nx.to_latex(G) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_pylab.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_pylab.py new file mode 100644 index 0000000000000000000000000000000000000000..668a627077d664c3df3c557dbc19fc64cb94c83c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/drawing/tests/test_pylab.py @@ -0,0 +1,791 @@ +"""Unit tests for matplotlib drawing functions.""" +import itertools +import os +import warnings + +import pytest + +mpl = pytest.importorskip("matplotlib") +np = pytest.importorskip("numpy") +mpl.use("PS") +plt = pytest.importorskip("matplotlib.pyplot") +plt.rcParams["text.usetex"] = False + + +import networkx as nx + +barbell = nx.barbell_graph(4, 6) + + +def test_draw(): + try: + functions = [ + nx.draw_circular, + nx.draw_kamada_kawai, + nx.draw_planar, + nx.draw_random, + nx.draw_spectral, + nx.draw_spring, + nx.draw_shell, + ] + options = [{"node_color": "black", "node_size": 100, "width": 3}] + for function, option in itertools.product(functions, options): + function(barbell, **option) + plt.savefig("test.ps") + + finally: + try: + os.unlink("test.ps") + except OSError: + pass + + +def test_draw_shell_nlist(): + try: + nlist = [list(range(4)), list(range(4, 10)), list(range(10, 14))] + nx.draw_shell(barbell, nlist=nlist) + plt.savefig("test.ps") + finally: + try: + os.unlink("test.ps") + except OSError: + pass + + +def test_edge_colormap(): + colors = range(barbell.number_of_edges()) + nx.draw_spring( + barbell, edge_color=colors, width=4, edge_cmap=plt.cm.Blues, with_labels=True + ) + # plt.show() + + +def test_arrows(): + nx.draw_spring(barbell.to_directed()) + # plt.show() + + +@pytest.mark.parametrize( + ("edge_color", "expected"), + ( + (None, "black"), # Default + ("r", "red"), # Non-default color string + (["r"], "red"), # Single non-default color in a list + ((1.0, 1.0, 0.0), "yellow"), # single color as rgb tuple + ([(1.0, 1.0, 0.0)], "yellow"), # single color as rgb tuple in list + ((0, 1, 0, 1), "lime"), # single color as rgba tuple + ([(0, 1, 0, 1)], "lime"), # single color as rgba tuple in list + ("#0000ff", "blue"), # single color hex code + (["#0000ff"], "blue"), # hex code in list + ), +) +@pytest.mark.parametrize("edgelist", (None, [(0, 1)])) +def test_single_edge_color_undirected(edge_color, expected, edgelist): + """Tests ways of specifying all edges have a single color for edges + drawn with a LineCollection""" + + G = nx.path_graph(3) + drawn_edges = nx.draw_networkx_edges( + G, pos=nx.random_layout(G), edgelist=edgelist, edge_color=edge_color + ) + assert mpl.colors.same_color(drawn_edges.get_color(), expected) + + +@pytest.mark.parametrize( + ("edge_color", "expected"), + ( + (None, "black"), # Default + ("r", "red"), # Non-default color string + (["r"], "red"), # Single non-default color in a list + ((1.0, 1.0, 0.0), "yellow"), # single color as rgb tuple + ([(1.0, 1.0, 0.0)], "yellow"), # single color as rgb tuple in list + ((0, 1, 0, 1), "lime"), # single color as rgba tuple + ([(0, 1, 0, 1)], "lime"), # single color as rgba tuple in list + ("#0000ff", "blue"), # single color hex code + (["#0000ff"], "blue"), # hex code in list + ), +) +@pytest.mark.parametrize("edgelist", (None, [(0, 1)])) +def test_single_edge_color_directed(edge_color, expected, edgelist): + """Tests ways of specifying all edges have a single color for edges drawn + with FancyArrowPatches""" + + G = nx.path_graph(3, create_using=nx.DiGraph) + drawn_edges = nx.draw_networkx_edges( + G, pos=nx.random_layout(G), edgelist=edgelist, edge_color=edge_color + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), expected) + + +def test_edge_color_tuple_interpretation(): + """If edge_color is a sequence with the same length as edgelist, then each + value in edge_color is mapped onto each edge via colormap.""" + G = nx.path_graph(6, create_using=nx.DiGraph) + pos = {n: (n, n) for n in range(len(G))} + + # num edges != 3 or 4 --> edge_color interpreted as rgb(a) + for ec in ((0, 0, 1), (0, 0, 1, 1)): + # More than 4 edges + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=ec) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), ec) + # Fewer than 3 edges + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2)], edge_color=ec + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), ec) + + # num edges == 3, len(edge_color) == 4: interpreted as rgba + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3)], edge_color=(0, 0, 1, 1) + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), "blue") + + # num edges == 4, len(edge_color) == 3: interpreted as rgb + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3), (3, 4)], edge_color=(0, 0, 1) + ) + for fap in drawn_edges: + assert mpl.colors.same_color(fap.get_edgecolor(), "blue") + + # num edges == len(edge_color) == 3: interpreted with cmap, *not* as rgb + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3)], edge_color=(0, 0, 1) + ) + assert mpl.colors.same_color( + drawn_edges[0].get_edgecolor(), drawn_edges[1].get_edgecolor() + ) + for fap in drawn_edges: + assert not mpl.colors.same_color(fap.get_edgecolor(), "blue") + + # num edges == len(edge_color) == 4: interpreted with cmap, *not* as rgba + drawn_edges = nx.draw_networkx_edges( + G, pos, edgelist=[(0, 1), (1, 2), (2, 3), (3, 4)], edge_color=(0, 0, 1, 1) + ) + assert mpl.colors.same_color( + drawn_edges[0].get_edgecolor(), drawn_edges[1].get_edgecolor() + ) + assert mpl.colors.same_color( + drawn_edges[2].get_edgecolor(), drawn_edges[3].get_edgecolor() + ) + for fap in drawn_edges: + assert not mpl.colors.same_color(fap.get_edgecolor(), "blue") + + +def test_fewer_edge_colors_than_num_edges_directed(): + """Test that the edge colors are cycled when there are fewer specified + colors than edges.""" + G = barbell.to_directed() + pos = nx.random_layout(barbell) + edgecolors = ("r", "g", "b") + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=edgecolors) + for fap, expected in zip(drawn_edges, itertools.cycle(edgecolors)): + assert mpl.colors.same_color(fap.get_edgecolor(), expected) + + +def test_more_edge_colors_than_num_edges_directed(): + """Test that extra edge colors are ignored when there are more specified + colors than edges.""" + G = nx.path_graph(4, create_using=nx.DiGraph) # 3 edges + pos = nx.random_layout(barbell) + edgecolors = ("r", "g", "b", "c") # 4 edge colors + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=edgecolors) + for fap, expected in zip(drawn_edges, edgecolors[:-1]): + assert mpl.colors.same_color(fap.get_edgecolor(), expected) + + +def test_edge_color_string_with_global_alpha_undirected(): + edge_collection = nx.draw_networkx_edges( + barbell, + pos=nx.random_layout(barbell), + edgelist=[(0, 1), (1, 2)], + edge_color="purple", + alpha=0.2, + ) + ec = edge_collection.get_color().squeeze() # as rgba tuple + assert len(edge_collection.get_paths()) == 2 + assert mpl.colors.same_color(ec[:-1], "purple") + assert ec[-1] == 0.2 + + +def test_edge_color_string_with_global_alpha_directed(): + drawn_edges = nx.draw_networkx_edges( + barbell.to_directed(), + pos=nx.random_layout(barbell), + edgelist=[(0, 1), (1, 2)], + edge_color="purple", + alpha=0.2, + ) + assert len(drawn_edges) == 2 + for fap in drawn_edges: + ec = fap.get_edgecolor() # As rgba tuple + assert mpl.colors.same_color(ec[:-1], "purple") + assert ec[-1] == 0.2 + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_edge_width_default_value(graph_type): + """Test the default linewidth for edges drawn either via LineCollection or + FancyArrowPatches.""" + G = nx.path_graph(2, create_using=graph_type) + pos = {n: (n, n) for n in range(len(G))} + drawn_edges = nx.draw_networkx_edges(G, pos) + if isinstance(drawn_edges, list): # directed case: list of FancyArrowPatch + drawn_edges = drawn_edges[0] + assert drawn_edges.get_linewidth() == 1 + + +@pytest.mark.parametrize( + ("edgewidth", "expected"), + ( + (3, 3), # single-value, non-default + ([3], 3), # Single value as a list + ), +) +def test_edge_width_single_value_undirected(edgewidth, expected): + G = nx.path_graph(4) + pos = {n: (n, n) for n in range(len(G))} + drawn_edges = nx.draw_networkx_edges(G, pos, width=edgewidth) + assert len(drawn_edges.get_paths()) == 3 + assert drawn_edges.get_linewidth() == expected + + +@pytest.mark.parametrize( + ("edgewidth", "expected"), + ( + (3, 3), # single-value, non-default + ([3], 3), # Single value as a list + ), +) +def test_edge_width_single_value_directed(edgewidth, expected): + G = nx.path_graph(4, create_using=nx.DiGraph) + pos = {n: (n, n) for n in range(len(G))} + drawn_edges = nx.draw_networkx_edges(G, pos, width=edgewidth) + assert len(drawn_edges) == 3 + for fap in drawn_edges: + assert fap.get_linewidth() == expected + + +@pytest.mark.parametrize( + "edgelist", + ( + [(0, 1), (1, 2), (2, 3)], # one width specification per edge + None, # fewer widths than edges - widths cycle + [(0, 1), (1, 2)], # More widths than edges - unused widths ignored + ), +) +def test_edge_width_sequence(edgelist): + G = barbell.to_directed() + pos = nx.random_layout(G) + widths = (0.5, 2.0, 12.0) + drawn_edges = nx.draw_networkx_edges(G, pos, edgelist=edgelist, width=widths) + for fap, expected_width in zip(drawn_edges, itertools.cycle(widths)): + assert fap.get_linewidth() == expected_width + + +def test_edge_color_with_edge_vmin_vmax(): + """Test that edge_vmin and edge_vmax properly set the dynamic range of the + color map when num edges == len(edge_colors).""" + G = nx.path_graph(3, create_using=nx.DiGraph) + pos = nx.random_layout(G) + # Extract colors from the original (unscaled) colormap + drawn_edges = nx.draw_networkx_edges(G, pos, edge_color=[0, 1.0]) + orig_colors = [e.get_edgecolor() for e in drawn_edges] + # Colors from scaled colormap + drawn_edges = nx.draw_networkx_edges( + G, pos, edge_color=[0.2, 0.8], edge_vmin=0.2, edge_vmax=0.8 + ) + scaled_colors = [e.get_edgecolor() for e in drawn_edges] + assert mpl.colors.same_color(orig_colors, scaled_colors) + + +def test_directed_edges_linestyle_default(): + """Test default linestyle for edges drawn with FancyArrowPatches.""" + G = nx.path_graph(4, create_using=nx.DiGraph) # Graph with 3 edges + pos = {n: (n, n) for n in range(len(G))} + + # edge with default style + drawn_edges = nx.draw_networkx_edges(G, pos) + assert len(drawn_edges) == 3 + for fap in drawn_edges: + assert fap.get_linestyle() == "solid" + + +@pytest.mark.parametrize( + "style", + ( + "dashed", # edge with string style + "--", # edge with simplified string style + (1, (1, 1)), # edge with (offset, onoffseq) style + ), +) +def test_directed_edges_linestyle_single_value(style): + """Tests support for specifying linestyles with a single value to be applied to + all edges in ``draw_networkx_edges`` for FancyArrowPatch outputs + (e.g. directed edges).""" + + G = nx.path_graph(4, create_using=nx.DiGraph) # Graph with 3 edges + pos = {n: (n, n) for n in range(len(G))} + + drawn_edges = nx.draw_networkx_edges(G, pos, style=style) + assert len(drawn_edges) == 3 + for fap in drawn_edges: + assert fap.get_linestyle() == style + + +@pytest.mark.parametrize( + "style_seq", + ( + ["dashed"], # edge with string style in list + ["--"], # edge with simplified string style in list + [(1, (1, 1))], # edge with (offset, onoffseq) style in list + ["--", "-", ":"], # edges with styles for each edge + ["--", "-"], # edges with fewer styles than edges (styles cycle) + ["--", "-", ":", "-."], # edges with more styles than edges (extra unused) + ), +) +def test_directed_edges_linestyle_sequence(style_seq): + """Tests support for specifying linestyles with sequences in + ``draw_networkx_edges`` for FancyArrowPatch outputs (e.g. directed edges).""" + + G = nx.path_graph(4, create_using=nx.DiGraph) # Graph with 3 edges + pos = {n: (n, n) for n in range(len(G))} + + drawn_edges = nx.draw_networkx_edges(G, pos, style=style_seq) + assert len(drawn_edges) == 3 + for fap, style in zip(drawn_edges, itertools.cycle(style_seq)): + assert fap.get_linestyle() == style + + +def test_labels_and_colors(): + G = nx.cubical_graph() + pos = nx.spring_layout(G) # positions for all nodes + # nodes + nx.draw_networkx_nodes( + G, pos, nodelist=[0, 1, 2, 3], node_color="r", node_size=500, alpha=0.75 + ) + nx.draw_networkx_nodes( + G, + pos, + nodelist=[4, 5, 6, 7], + node_color="b", + node_size=500, + alpha=[0.25, 0.5, 0.75, 1.0], + ) + # edges + nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5) + nx.draw_networkx_edges( + G, + pos, + edgelist=[(0, 1), (1, 2), (2, 3), (3, 0)], + width=8, + alpha=0.5, + edge_color="r", + ) + nx.draw_networkx_edges( + G, + pos, + edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)], + width=8, + alpha=0.5, + edge_color="b", + ) + nx.draw_networkx_edges( + G, + pos, + edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)], + arrows=True, + min_source_margin=0.5, + min_target_margin=0.75, + width=8, + edge_color="b", + ) + # some math labels + labels = {} + labels[0] = r"$a$" + labels[1] = r"$b$" + labels[2] = r"$c$" + labels[3] = r"$d$" + labels[4] = r"$\alpha$" + labels[5] = r"$\beta$" + labels[6] = r"$\gamma$" + labels[7] = r"$\delta$" + nx.draw_networkx_labels(G, pos, labels, font_size=16) + nx.draw_networkx_edge_labels(G, pos, edge_labels=None, rotate=False) + nx.draw_networkx_edge_labels(G, pos, edge_labels={(4, 5): "4-5"}) + # plt.show() + + +@pytest.mark.mpl_image_compare +def test_house_with_colors(): + G = nx.house_graph() + # explicitly set positions + fig, ax = plt.subplots() + pos = {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1), 4: (0.5, 2.0)} + + # Plot nodes with different properties for the "wall" and "roof" nodes + nx.draw_networkx_nodes( + G, + pos, + node_size=3000, + nodelist=[0, 1, 2, 3], + node_color="tab:blue", + ) + nx.draw_networkx_nodes( + G, pos, node_size=2000, nodelist=[4], node_color="tab:orange" + ) + nx.draw_networkx_edges(G, pos, alpha=0.5, width=6) + # Customize axes + ax.margins(0.11) + plt.tight_layout() + plt.axis("off") + return fig + + +def test_axes(): + fig, ax = plt.subplots() + nx.draw(barbell, ax=ax) + nx.draw_networkx_edge_labels(barbell, nx.circular_layout(barbell), ax=ax) + + +def test_empty_graph(): + G = nx.Graph() + nx.draw(G) + + +def test_draw_empty_nodes_return_values(): + # See Issue #3833 + import matplotlib.collections # call as mpl.collections + + G = nx.Graph([(1, 2), (2, 3)]) + DG = nx.DiGraph([(1, 2), (2, 3)]) + pos = nx.circular_layout(G) + assert isinstance( + nx.draw_networkx_nodes(G, pos, nodelist=[]), mpl.collections.PathCollection + ) + assert isinstance( + nx.draw_networkx_nodes(DG, pos, nodelist=[]), mpl.collections.PathCollection + ) + + # drawing empty edges used to return an empty LineCollection or empty list. + # Now it is always an empty list (because edges are now lists of FancyArrows) + assert nx.draw_networkx_edges(G, pos, edgelist=[], arrows=True) == [] + assert nx.draw_networkx_edges(G, pos, edgelist=[], arrows=False) == [] + assert nx.draw_networkx_edges(DG, pos, edgelist=[], arrows=False) == [] + assert nx.draw_networkx_edges(DG, pos, edgelist=[], arrows=True) == [] + + +def test_multigraph_edgelist_tuples(): + # See Issue #3295 + G = nx.path_graph(3, create_using=nx.MultiDiGraph) + nx.draw_networkx(G, edgelist=[(0, 1, 0)]) + nx.draw_networkx(G, edgelist=[(0, 1, 0)], node_size=[10, 20, 0]) + + +def test_alpha_iter(): + pos = nx.random_layout(barbell) + fig = plt.figure() + # with fewer alpha elements than nodes + fig.add_subplot(131) # Each test in a new axis object + nx.draw_networkx_nodes(barbell, pos, alpha=[0.1, 0.2]) + # with equal alpha elements and nodes + num_nodes = len(barbell.nodes) + alpha = [x / num_nodes for x in range(num_nodes)] + colors = range(num_nodes) + fig.add_subplot(132) + nx.draw_networkx_nodes(barbell, pos, node_color=colors, alpha=alpha) + # with more alpha elements than nodes + alpha.append(1) + fig.add_subplot(133) + nx.draw_networkx_nodes(barbell, pos, alpha=alpha) + + +def test_error_invalid_kwds(): + with pytest.raises(ValueError, match="Received invalid argument"): + nx.draw(barbell, foo="bar") + + +def test_draw_networkx_arrowsize_incorrect_size(): + G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 3)]) + arrowsize = [1, 2, 3] + with pytest.raises( + ValueError, match="arrowsize should have the same length as edgelist" + ): + nx.draw(G, arrowsize=arrowsize) + + +@pytest.mark.parametrize("arrowsize", (30, [10, 20, 30])) +def test_draw_edges_arrowsize(arrowsize): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pos = {0: (0, 0), 1: (0, 1), 2: (1, 0)} + edges = nx.draw_networkx_edges(G, pos=pos, arrowsize=arrowsize) + + arrowsize = itertools.repeat(arrowsize) if isinstance(arrowsize, int) else arrowsize + + for fap, expected in zip(edges, arrowsize): + assert isinstance(fap, mpl.patches.FancyArrowPatch) + assert fap.get_mutation_scale() == expected + + +def test_np_edgelist(): + # see issue #4129 + nx.draw_networkx(barbell, edgelist=np.array([(0, 2), (0, 3)])) + + +def test_draw_nodes_missing_node_from_position(): + G = nx.path_graph(3) + pos = {0: (0, 0), 1: (1, 1)} # No position for node 2 + with pytest.raises(nx.NetworkXError, match="has no position"): + nx.draw_networkx_nodes(G, pos) + + +# NOTE: parametrizing on marker to test both branches of internal +# nx.draw_networkx_edges.to_marker_edge function +@pytest.mark.parametrize("node_shape", ("o", "s")) +def test_draw_edges_min_source_target_margins(node_shape): + """Test that there is a wider gap between the node and the start of an + incident edge when min_source_margin is specified. + + This test checks that the use of min_{source/target}_margin kwargs result + in shorter (more padding) between the edges and source and target nodes. + As a crude visual example, let 's' and 't' represent source and target + nodes, respectively: + + Default: + s-----------------------------t + + With margins: + s ----------------------- t + + """ + # Create a single axis object to get consistent pixel coords across + # multiple draws + fig, ax = plt.subplots() + G = nx.DiGraph([(0, 1)]) + pos = {0: (0, 0), 1: (1, 0)} # horizontal layout + # Get leftmost and rightmost points of the FancyArrowPatch object + # representing the edge between nodes 0 and 1 (in pixel coordinates) + default_patch = nx.draw_networkx_edges(G, pos, ax=ax, node_shape=node_shape)[0] + default_extent = default_patch.get_extents().corners()[::2, 0] + # Now, do the same but with "padding" for the source and target via the + # min_{source/target}_margin kwargs + padded_patch = nx.draw_networkx_edges( + G, + pos, + ax=ax, + node_shape=node_shape, + min_source_margin=100, + min_target_margin=100, + )[0] + padded_extent = padded_patch.get_extents().corners()[::2, 0] + + # With padding, the left-most extent of the edge should be further to the + # right + assert padded_extent[0] > default_extent[0] + # And the rightmost extent of the edge, further to the left + assert padded_extent[1] < default_extent[1] + + +def test_nonzero_selfloop_with_single_node(): + """Ensure that selfloop extent is non-zero when there is only one node.""" + # Create explicit axis object for test + fig, ax = plt.subplots() + # Graph with single node + self loop + G = nx.DiGraph() + G.add_node(0) + G.add_edge(0, 0) + # Draw + patch = nx.draw_networkx_edges(G, {0: (0, 0)})[0] + # The resulting patch must have non-zero extent + bbox = patch.get_extents() + assert bbox.width > 0 and bbox.height > 0 + # Cleanup + plt.delaxes(ax) + + +def test_nonzero_selfloop_with_single_edge_in_edgelist(): + """Ensure that selfloop extent is non-zero when only a single edge is + specified in the edgelist. + """ + # Create explicit axis object for test + fig, ax = plt.subplots() + # Graph with selfloop + G = nx.path_graph(2, create_using=nx.DiGraph) + G.add_edge(1, 1) + pos = {n: (n, n) for n in G.nodes} + # Draw only the selfloop edge via the `edgelist` kwarg + patch = nx.draw_networkx_edges(G, pos, edgelist=[(1, 1)])[0] + # The resulting patch must have non-zero extent + bbox = patch.get_extents() + assert bbox.width > 0 and bbox.height > 0 + # Cleanup + plt.delaxes(ax) + + +def test_apply_alpha(): + """Test apply_alpha when there is a mismatch between the number of + supplied colors and elements. + """ + nodelist = [0, 1, 2] + colorlist = ["r", "g", "b"] + alpha = 0.5 + rgba_colors = nx.drawing.nx_pylab.apply_alpha(colorlist, alpha, nodelist) + assert all(rgba_colors[:, -1] == alpha) + + +def test_draw_edges_toggling_with_arrows_kwarg(): + """ + The `arrows` keyword argument is used as a 3-way switch to select which + type of object to use for drawing edges: + - ``arrows=None`` -> default (FancyArrowPatches for directed, else LineCollection) + - ``arrows=True`` -> FancyArrowPatches + - ``arrows=False`` -> LineCollection + """ + import matplotlib.collections + import matplotlib.patches + + UG = nx.path_graph(3) + DG = nx.path_graph(3, create_using=nx.DiGraph) + pos = {n: (n, n) for n in UG} + + # Use FancyArrowPatches when arrows=True, regardless of graph type + for G in (UG, DG): + edges = nx.draw_networkx_edges(G, pos, arrows=True) + assert len(edges) == len(G.edges) + assert isinstance(edges[0], mpl.patches.FancyArrowPatch) + + # Use LineCollection when arrows=False, regardless of graph type + for G in (UG, DG): + edges = nx.draw_networkx_edges(G, pos, arrows=False) + assert isinstance(edges, mpl.collections.LineCollection) + + # Default behavior when arrows=None: FAPs for directed, LC's for undirected + edges = nx.draw_networkx_edges(UG, pos) + assert isinstance(edges, mpl.collections.LineCollection) + edges = nx.draw_networkx_edges(DG, pos) + assert len(edges) == len(G.edges) + assert isinstance(edges[0], mpl.patches.FancyArrowPatch) + + +@pytest.mark.parametrize("drawing_func", (nx.draw, nx.draw_networkx)) +def test_draw_networkx_arrows_default_undirected(drawing_func): + import matplotlib.collections + + G = nx.path_graph(3) + fig, ax = plt.subplots() + drawing_func(G, ax=ax) + assert any(isinstance(c, mpl.collections.LineCollection) for c in ax.collections) + assert not ax.patches + plt.delaxes(ax) + + +@pytest.mark.parametrize("drawing_func", (nx.draw, nx.draw_networkx)) +def test_draw_networkx_arrows_default_directed(drawing_func): + import matplotlib.collections + + G = nx.path_graph(3, create_using=nx.DiGraph) + fig, ax = plt.subplots() + drawing_func(G, ax=ax) + assert not any( + isinstance(c, mpl.collections.LineCollection) for c in ax.collections + ) + assert ax.patches + plt.delaxes(ax) + + +def test_edgelist_kwarg_not_ignored(): + # See gh-4994 + G = nx.path_graph(3) + G.add_edge(0, 0) + fig, ax = plt.subplots() + nx.draw(G, edgelist=[(0, 1), (1, 2)], ax=ax) # Exclude self-loop from edgelist + assert not ax.patches + plt.delaxes(ax) + + +def test_draw_networkx_edge_label_multiedge_exception(): + """ + draw_networkx_edge_labels should raise an informative error message when + the edge label includes keys + """ + exception_msg = "draw_networkx_edge_labels does not support multiedges" + G = nx.MultiGraph() + G.add_edge(0, 1, weight=10) + G.add_edge(0, 1, weight=20) + edge_labels = nx.get_edge_attributes(G, "weight") # Includes edge keys + pos = {n: (n, n) for n in G} + with pytest.raises(nx.NetworkXError, match=exception_msg): + nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels) + + +def test_draw_networkx_edge_label_empty_dict(): + """Regression test for draw_networkx_edge_labels with empty dict. See + gh-5372.""" + G = nx.path_graph(3) + pos = {n: (n, n) for n in G.nodes} + assert nx.draw_networkx_edge_labels(G, pos, edge_labels={}) == {} + + +def test_draw_networkx_edges_undirected_selfloop_colors(): + """When an edgelist is supplied along with a sequence of colors, check that + the self-loops have the correct colors.""" + fig, ax = plt.subplots() + # Edge list and corresponding colors + edgelist = [(1, 3), (1, 2), (2, 3), (1, 1), (3, 3), (2, 2)] + edge_colors = ["pink", "cyan", "black", "red", "blue", "green"] + + G = nx.Graph(edgelist) + pos = {n: (n, n) for n in G.nodes} + nx.draw_networkx_edges(G, pos, ax=ax, edgelist=edgelist, edge_color=edge_colors) + + # Verify that there are three fancy arrow patches (1 per self loop) + assert len(ax.patches) == 3 + + # These are points that should be contained in the self loops. For example, + # sl_points[0] will be (1, 1.1), which is inside the "path" of the first + # self-loop but outside the others + sl_points = np.array(edgelist[-3:]) + np.array([0, 0.1]) + + # Check that the mapping between self-loop locations and their colors is + # correct + for fap, clr, slp in zip(ax.patches, edge_colors[-3:], sl_points): + assert fap.get_path().contains_point(slp) + assert mpl.colors.same_color(fap.get_edgecolor(), clr) + plt.delaxes(ax) + + +@pytest.mark.parametrize( + "fap_only_kwarg", # Non-default values for kwargs that only apply to FAPs + ( + {"arrowstyle": "-"}, + {"arrowsize": 20}, + {"connectionstyle": "arc3,rad=0.2"}, + {"min_source_margin": 10}, + {"min_target_margin": 10}, + ), +) +def test_user_warnings_for_unused_edge_drawing_kwargs(fap_only_kwarg): + """Users should get a warning when they specify a non-default value for + one of the kwargs that applies only to edges drawn with FancyArrowPatches, + but FancyArrowPatches aren't being used under the hood.""" + G = nx.path_graph(3) + pos = {n: (n, n) for n in G} + fig, ax = plt.subplots() + # By default, an undirected graph will use LineCollection to represent + # the edges + kwarg_name = list(fap_only_kwarg.keys())[0] + with pytest.warns( + UserWarning, match=f"\n\nThe {kwarg_name} keyword argument is not applicable" + ): + nx.draw_networkx_edges(G, pos, ax=ax, **fap_only_kwarg) + # FancyArrowPatches are always used when `arrows=True` is specified. + # Check that warnings are *not* raised in this case + with warnings.catch_warnings(): + # Escalate warnings -> errors so tests fail if warnings are raised + warnings.simplefilter("error") + nx.draw_networkx_edges(G, pos, ax=ax, arrows=True, **fap_only_kwarg) + + plt.delaxes(ax) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92f9927e2274973faaf5432b77364c0c23176dbf Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3372a58b779a1474044cbd5568e7236b78c49d76 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f96640ef14ae8e12a0620b7da801df1048c3dd15 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f6df61a51ee8f0f0ebcd6ce9550c296b7242bcc Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_convert_numpy.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_convert_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..ab73172a4b55eb91e60747f5a8957d7a600eb85a --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_convert_numpy.py @@ -0,0 +1,395 @@ +import pytest + +np = pytest.importorskip("numpy") +npt = pytest.importorskip("numpy.testing") + +import networkx as nx +from networkx.generators.classic import barbell_graph, cycle_graph, path_graph +from networkx.utils import graphs_equal + + +class TestConvertNumpyArray: + def setup_method(self): + self.G1 = barbell_graph(10, 3) + self.G2 = cycle_graph(10, create_using=nx.DiGraph) + self.G3 = self.create_weighted(nx.Graph()) + self.G4 = self.create_weighted(nx.DiGraph()) + + def create_weighted(self, G): + g = cycle_graph(4) + G.add_nodes_from(g) + G.add_weighted_edges_from((u, v, 10 + u) for u, v in g.edges()) + return G + + def assert_equal(self, G1, G2): + assert sorted(G1.nodes()) == sorted(G2.nodes()) + assert sorted(G1.edges()) == sorted(G2.edges()) + + def identity_conversion(self, G, A, create_using): + assert A.sum() > 0 + GG = nx.from_numpy_array(A, create_using=create_using) + self.assert_equal(G, GG) + GW = nx.to_networkx_graph(A, create_using=create_using) + self.assert_equal(G, GW) + GI = nx.empty_graph(0, create_using).__class__(A) + self.assert_equal(G, GI) + + def test_shape(self): + "Conversion from non-square array." + A = np.array([[1, 2, 3], [4, 5, 6]]) + pytest.raises(nx.NetworkXError, nx.from_numpy_array, A) + + def test_identity_graph_array(self): + "Conversion from graph to array to graph." + A = nx.to_numpy_array(self.G1) + self.identity_conversion(self.G1, A, nx.Graph()) + + def test_identity_digraph_array(self): + """Conversion from digraph to array to digraph.""" + A = nx.to_numpy_array(self.G2) + self.identity_conversion(self.G2, A, nx.DiGraph()) + + def test_identity_weighted_graph_array(self): + """Conversion from weighted graph to array to weighted graph.""" + A = nx.to_numpy_array(self.G3) + self.identity_conversion(self.G3, A, nx.Graph()) + + def test_identity_weighted_digraph_array(self): + """Conversion from weighted digraph to array to weighted digraph.""" + A = nx.to_numpy_array(self.G4) + self.identity_conversion(self.G4, A, nx.DiGraph()) + + def test_nodelist(self): + """Conversion from graph to array to graph with nodelist.""" + P4 = path_graph(4) + P3 = path_graph(3) + nodelist = list(P3) + A = nx.to_numpy_array(P4, nodelist=nodelist) + GA = nx.Graph(A) + self.assert_equal(GA, P3) + + # Make nodelist ambiguous by containing duplicates. + nodelist += [nodelist[0]] + pytest.raises(nx.NetworkXError, nx.to_numpy_array, P3, nodelist=nodelist) + + # Make nodelist invalid by including nonexistent nodes + nodelist = [-1, 0, 1] + with pytest.raises( + nx.NetworkXError, + match=f"Nodes {nodelist - P3.nodes} in nodelist is not in G", + ): + nx.to_numpy_array(P3, nodelist=nodelist) + + def test_weight_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = path_graph(4) + A = nx.to_numpy_array(P4) + np.testing.assert_equal(A, nx.to_numpy_array(WP4, weight=None)) + np.testing.assert_equal(0.5 * A, nx.to_numpy_array(WP4)) + np.testing.assert_equal(0.3 * A, nx.to_numpy_array(WP4, weight="other")) + + def test_from_numpy_array_type(self): + A = np.array([[1]]) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == int + + A = np.array([[1]]).astype(float) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == float + + A = np.array([[1]]).astype(str) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == str + + A = np.array([[1]]).astype(bool) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == bool + + A = np.array([[1]]).astype(complex) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == complex + + A = np.array([[1]]).astype(object) + pytest.raises(TypeError, nx.from_numpy_array, A) + + A = np.array([[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]]) + with pytest.raises( + nx.NetworkXError, match=f"Input array must be 2D, not {A.ndim}" + ): + g = nx.from_numpy_array(A) + + def test_from_numpy_array_dtype(self): + dt = [("weight", float), ("cost", int)] + A = np.array([[(1.0, 2)]], dtype=dt) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == float + assert type(G[0][0]["cost"]) == int + assert G[0][0]["cost"] == 2 + assert G[0][0]["weight"] == 1.0 + + def test_from_numpy_array_parallel_edges(self): + """Tests that the :func:`networkx.from_numpy_array` function + interprets integer weights as the number of parallel edges when + creating a multigraph. + + """ + A = np.array([[1, 1], [1, 2]]) + # First, with a simple graph, each integer entry in the adjacency + # matrix is interpreted as the weight of a single edge in the graph. + expected = nx.DiGraph() + edges = [(0, 0), (0, 1), (1, 0)] + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + expected.add_edge(1, 1, weight=2) + actual = nx.from_numpy_array(A, parallel_edges=True, create_using=nx.DiGraph) + assert graphs_equal(actual, expected) + actual = nx.from_numpy_array(A, parallel_edges=False, create_using=nx.DiGraph) + assert graphs_equal(actual, expected) + # Now each integer entry in the adjacency matrix is interpreted as the + # number of parallel edges in the graph if the appropriate keyword + # argument is specified. + edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)] + expected = nx.MultiDiGraph() + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + actual = nx.from_numpy_array( + A, parallel_edges=True, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + expected = nx.MultiDiGraph() + expected.add_edges_from(set(edges), weight=1) + # The sole self-loop (edge 0) on vertex 1 should have weight 2. + expected[1][1][0]["weight"] = 2 + actual = nx.from_numpy_array( + A, parallel_edges=False, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + + @pytest.mark.parametrize( + "dt", + ( + None, # default + int, # integer dtype + np.dtype( + [("weight", "f8"), ("color", "i1")] + ), # Structured dtype with named fields + ), + ) + def test_from_numpy_array_no_edge_attr(self, dt): + A = np.array([[0, 1], [1, 0]], dtype=dt) + G = nx.from_numpy_array(A, edge_attr=None) + assert "weight" not in G.edges[0, 1] + assert len(G.edges[0, 1]) == 0 + + def test_from_numpy_array_multiedge_no_edge_attr(self): + A = np.array([[0, 2], [2, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiDiGraph, edge_attr=None) + assert all("weight" not in e for _, e in G[0][1].items()) + assert len(G[0][1][0]) == 0 + + def test_from_numpy_array_custom_edge_attr(self): + A = np.array([[0, 2], [3, 0]]) + G = nx.from_numpy_array(A, edge_attr="cost") + assert "weight" not in G.edges[0, 1] + assert G.edges[0, 1]["cost"] == 3 + + def test_symmetric(self): + """Tests that a symmetric array has edges added only once to an + undirected multigraph when using :func:`networkx.from_numpy_array`. + + """ + A = np.array([[0, 1], [1, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + expected = nx.MultiGraph() + expected.add_edge(0, 1, weight=1) + assert graphs_equal(G, expected) + + def test_dtype_int_graph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.complete_graph(3) + A = nx.to_numpy_array(G, dtype=int) + assert A.dtype == int + + def test_dtype_int_multigraph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.MultiGraph(nx.complete_graph(3)) + A = nx.to_numpy_array(G, dtype=int) + assert A.dtype == int + + +@pytest.fixture +def multigraph_test_graph(): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=7) + G.add_edge(1, 2, weight=70) + return G + + +@pytest.mark.parametrize(("operator", "expected"), ((sum, 77), (min, 7), (max, 70))) +def test_numpy_multigraph(multigraph_test_graph, operator, expected): + A = nx.to_numpy_array(multigraph_test_graph, multigraph_weight=operator) + assert A[1, 0] == expected + + +def test_to_numpy_array_multigraph_nodelist(multigraph_test_graph): + G = multigraph_test_graph + G.add_edge(0, 1, weight=3) + A = nx.to_numpy_array(G, nodelist=[1, 2]) + assert A.shape == (2, 2) + assert A[1, 0] == 77 + + +@pytest.mark.parametrize( + "G, expected", + [ + (nx.Graph(), np.array([[0, 1 + 2j], [1 + 2j, 0]], dtype=complex)), + (nx.DiGraph(), np.array([[0, 1 + 2j], [0, 0]], dtype=complex)), + ], +) +def test_to_numpy_array_complex_weights(G, expected): + G.add_edge(0, 1, weight=1 + 2j) + A = nx.to_numpy_array(G, dtype=complex) + npt.assert_array_equal(A, expected) + + +def test_to_numpy_array_arbitrary_weights(): + G = nx.DiGraph() + w = 922337203685477580102 # Out of range for int64 + G.add_edge(0, 1, weight=922337203685477580102) # val not representable by int64 + A = nx.to_numpy_array(G, dtype=object) + expected = np.array([[0, w], [0, 0]], dtype=object) + npt.assert_array_equal(A, expected) + + # Undirected + A = nx.to_numpy_array(G.to_undirected(), dtype=object) + expected = np.array([[0, w], [w, 0]], dtype=object) + npt.assert_array_equal(A, expected) + + +@pytest.mark.parametrize( + "func, expected", + ((min, -1), (max, 10), (sum, 11), (np.mean, 11 / 3), (np.median, 2)), +) +def test_to_numpy_array_multiweight_reduction(func, expected): + """Test various functions for reducing multiedge weights.""" + G = nx.MultiDiGraph() + weights = [-1, 2, 10.0] + for w in weights: + G.add_edge(0, 1, weight=w) + A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float) + assert np.allclose(A, [[0, expected], [0, 0]]) + + # Undirected case + A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float) + assert np.allclose(A, [[0, expected], [expected, 0]]) + + +@pytest.mark.parametrize( + ("G, expected"), + [ + (nx.Graph(), [[(0, 0), (10, 5)], [(10, 5), (0, 0)]]), + (nx.DiGraph(), [[(0, 0), (10, 5)], [(0, 0), (0, 0)]]), + ], +) +def test_to_numpy_array_structured_dtype_attrs_from_fields(G, expected): + """When `dtype` is structured (i.e. has names) and `weight` is None, use + the named fields of the dtype to look up edge attributes.""" + G.add_edge(0, 1, weight=10, cost=5.0) + dtype = np.dtype([("weight", int), ("cost", int)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.asarray(expected, dtype=dtype) + npt.assert_array_equal(A, expected) + + +def test_to_numpy_array_structured_dtype_single_attr_default(): + G = nx.path_graph(3) + dtype = np.dtype([("weight", float)]) # A single named field + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=float) + npt.assert_array_equal(A["weight"], expected) + + +@pytest.mark.parametrize( + ("field_name", "expected_attr_val"), + [ + ("weight", 1), + ("cost", 3), + ], +) +def test_to_numpy_array_structured_dtype_single_attr(field_name, expected_attr_val): + G = nx.Graph() + G.add_edge(0, 1, cost=3) + dtype = np.dtype([(field_name, float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.array([[0, expected_attr_val], [expected_attr_val, 0]], dtype=float) + npt.assert_array_equal(A[field_name], expected) + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +@pytest.mark.parametrize( + "edge", + [ + (0, 1), # No edge attributes + (0, 1, {"weight": 10}), # One edge attr + (0, 1, {"weight": 5, "flow": -4}), # Multiple but not all edge attrs + (0, 1, {"weight": 2.0, "cost": 10, "flow": -45}), # All attrs + ], +) +def test_to_numpy_array_structured_dtype_multiple_fields(graph_type, edge): + G = graph_type([edge]) + dtype = np.dtype([("weight", float), ("cost", float), ("flow", float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + for attr in dtype.names: + expected = nx.to_numpy_array(G, dtype=float, weight=attr) + npt.assert_array_equal(A[attr], expected) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_to_numpy_array_structured_dtype_scalar_nonedge(G): + G.add_edge(0, 1, weight=10) + dtype = np.dtype([("weight", float), ("cost", float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=np.nan) + for attr in dtype.names: + expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=np.nan) + npt.assert_array_equal(A[attr], expected) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_to_numpy_array_structured_dtype_nonedge_ary(G): + """Similar to the scalar case, except has a different non-edge value for + each named field.""" + G.add_edge(0, 1, weight=10) + dtype = np.dtype([("weight", float), ("cost", float)]) + nonedges = np.array([(0, np.inf)], dtype=dtype) + A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=nonedges) + for attr in dtype.names: + nonedge = nonedges[attr] + expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=nonedge) + npt.assert_array_equal(A[attr], expected) + + +def test_to_numpy_array_structured_dtype_with_weight_raises(): + """Using both a structured dtype (with named fields) and specifying a `weight` + parameter is ambiguous.""" + G = nx.path_graph(3) + dtype = np.dtype([("weight", int), ("cost", int)]) + exception_msg = "Specifying `weight` not supported for structured dtypes" + with pytest.raises(ValueError, match=exception_msg): + nx.to_numpy_array(G, dtype=dtype) # Default is weight="weight" + with pytest.raises(ValueError, match=exception_msg): + nx.to_numpy_array(G, dtype=dtype, weight="cost") + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_to_numpy_array_structured_multigraph_raises(graph_type): + G = nx.path_graph(3, create_using=graph_type) + dtype = np.dtype([("weight", int), ("cost", int)]) + with pytest.raises(nx.NetworkXError, match="Structured arrays are not supported"): + nx.to_numpy_array(G, dtype=dtype, weight=None) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_exceptions.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..cf59983cb8d12a119f5744ebc8b11e7cb9075366 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_exceptions.py @@ -0,0 +1,40 @@ +import pytest + +import networkx as nx + +# smoke tests for exceptions + + +def test_raises_networkxexception(): + with pytest.raises(nx.NetworkXException): + raise nx.NetworkXException + + +def test_raises_networkxerr(): + with pytest.raises(nx.NetworkXError): + raise nx.NetworkXError + + +def test_raises_networkx_pointless_concept(): + with pytest.raises(nx.NetworkXPointlessConcept): + raise nx.NetworkXPointlessConcept + + +def test_raises_networkxalgorithmerr(): + with pytest.raises(nx.NetworkXAlgorithmError): + raise nx.NetworkXAlgorithmError + + +def test_raises_networkx_unfeasible(): + with pytest.raises(nx.NetworkXUnfeasible): + raise nx.NetworkXUnfeasible + + +def test_raises_networkx_no_path(): + with pytest.raises(nx.NetworkXNoPath): + raise nx.NetworkXNoPath + + +def test_raises_networkx_unbounded(): + with pytest.raises(nx.NetworkXUnbounded): + raise nx.NetworkXUnbounded diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_relabel.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_relabel.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebf4d3ef490afce48e3e1298412edb05a385cdc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/tests/test_relabel.py @@ -0,0 +1,347 @@ +import pytest + +import networkx as nx +from networkx.generators.classic import empty_graph +from networkx.utils import edges_equal, nodes_equal + + +class TestRelabel: + def test_convert_node_labels_to_integers(self): + # test that empty graph converts fine for all options + G = empty_graph() + H = nx.convert_node_labels_to_integers(G, 100) + assert list(H.nodes()) == [] + assert list(H.edges()) == [] + + for opt in ["default", "sorted", "increasing degree", "decreasing degree"]: + G = empty_graph() + H = nx.convert_node_labels_to_integers(G, 100, ordering=opt) + assert list(H.nodes()) == [] + assert list(H.edges()) == [] + + G = empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + H = nx.convert_node_labels_to_integers(G) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + + H = nx.convert_node_labels_to_integers(G, 1000) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert nodes_equal(H.nodes(), [1000, 1001, 1002, 1003]) + + H = nx.convert_node_labels_to_integers(G, ordering="increasing degree") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 1 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 3 + + H = nx.convert_node_labels_to_integers(G, ordering="decreasing degree") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 3 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 1 + + H = nx.convert_node_labels_to_integers( + G, ordering="increasing degree", label_attribute="label" + ) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 1 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 3 + + # check mapping + assert H.nodes[3]["label"] == "C" + assert H.nodes[0]["label"] == "D" + assert H.nodes[1]["label"] == "A" or H.nodes[2]["label"] == "A" + assert H.nodes[1]["label"] == "B" or H.nodes[2]["label"] == "B" + + def test_convert_to_integers2(self): + G = empty_graph() + G.add_edges_from([("C", "D"), ("A", "B"), ("A", "C"), ("B", "C")]) + H = nx.convert_node_labels_to_integers(G, ordering="sorted") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + + H = nx.convert_node_labels_to_integers( + G, ordering="sorted", label_attribute="label" + ) + assert H.nodes[0]["label"] == "A" + assert H.nodes[1]["label"] == "B" + assert H.nodes[2]["label"] == "C" + assert H.nodes[3]["label"] == "D" + + def test_convert_to_integers_raise(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.convert_node_labels_to_integers(G, ordering="increasing age") + + def test_relabel_nodes_copy(self): + G = nx.empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_function(self): + G = nx.empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + # function mapping no longer encouraged but works + + def mapping(n): + return ord(n) + + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), [65, 66, 67, 68]) + + def test_relabel_nodes_callable_type(self): + G = nx.path_graph(4) + H = nx.relabel_nodes(G, str) + assert nodes_equal(H.nodes, ["0", "1", "2", "3"]) + + @pytest.mark.parametrize("non_mc", ("0123", ["0", "1", "2", "3"])) + def test_relabel_nodes_non_mapping_or_callable(self, non_mc): + """If `mapping` is neither a Callable or a Mapping, an exception + should be raised.""" + G = nx.path_graph(4) + with pytest.raises(AttributeError): + nx.relabel_nodes(G, non_mc) + + def test_relabel_nodes_graph(self): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_orderedgraph(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + G.add_edges_from([(1, 3), (2, 3)]) + mapping = {1: "a", 2: "b", 3: "c"} + H = nx.relabel_nodes(G, mapping) + assert list(H.nodes) == ["a", "b", "c"] + + def test_relabel_nodes_digraph(self): + G = nx.DiGraph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_multigraph(self): + G = nx.MultiGraph([("a", "b"), ("a", "b")]) + mapping = {"a": "aardvark", "b": "bear"} + G = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes(), ["aardvark", "bear"]) + assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")]) + + def test_relabel_nodes_multidigraph(self): + G = nx.MultiDiGraph([("a", "b"), ("a", "b")]) + mapping = {"a": "aardvark", "b": "bear"} + G = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes(), ["aardvark", "bear"]) + assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")]) + + def test_relabel_isolated_nodes_to_same(self): + G = nx.Graph() + G.add_nodes_from(range(4)) + mapping = {1: 1} + H = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(H.nodes(), list(range(4))) + + def test_relabel_nodes_missing(self): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {0: "aardvark"} + # copy=True + H = nx.relabel_nodes(G, mapping, copy=True) + assert nodes_equal(H.nodes, G.nodes) + # copy=False + GG = G.copy() + nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes, GG.nodes) + + def test_relabel_copy_name(self): + G = nx.Graph() + H = nx.relabel_nodes(G, {}, copy=True) + assert H.graph == G.graph + H = nx.relabel_nodes(G, {}, copy=False) + assert H.graph == G.graph + G.name = "first" + H = nx.relabel_nodes(G, {}, copy=True) + assert H.graph == G.graph + H = nx.relabel_nodes(G, {}, copy=False) + assert H.graph == G.graph + + def test_relabel_toposort(self): + K5 = nx.complete_graph(4) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, {i: i + 1 for i in range(4)}, copy=False) + assert nx.is_isomorphic(K5, G) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, {i: i - 1 for i in range(4)}, copy=False) + assert nx.is_isomorphic(K5, G) + + def test_relabel_selfloop(self): + G = nx.DiGraph([(1, 1), (1, 2), (2, 3)]) + G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False) + assert nodes_equal(G.nodes(), ["One", "Three", "Two"]) + G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)]) + G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False) + assert nodes_equal(G.nodes(), ["One", "Three", "Two"]) + G = nx.MultiDiGraph([(1, 1)]) + G = nx.relabel_nodes(G, {1: 0}, copy=False) + assert nodes_equal(G.nodes(), [0]) + + def test_relabel_multidigraph_inout_merge_nodes(self): + for MG in (nx.MultiGraph, nx.MultiDiGraph): + for cc in (True, False): + G = MG([(0, 4), (1, 4), (4, 2), (4, 3)]) + G[0][4][0]["value"] = "a" + G[1][4][0]["value"] = "b" + G[4][2][0]["value"] = "c" + G[4][3][0]["value"] = "d" + G.add_edge(0, 4, key="x", value="e") + G.add_edge(4, 3, key="x", value="f") + mapping = {0: 9, 1: 9, 2: 9, 3: 9} + H = nx.relabel_nodes(G, mapping, copy=cc) + # No ordering on keys enforced + assert {"value": "a"} in H[9][4].values() + assert {"value": "b"} in H[9][4].values() + assert {"value": "c"} in H[4][9].values() + assert len(H[4][9]) == 3 if G.is_directed() else 6 + assert {"value": "d"} in H[4][9].values() + assert {"value": "e"} in H[9][4].values() + assert {"value": "f"} in H[4][9].values() + assert len(H[9][4]) == 3 if G.is_directed() else 6 + + def test_relabel_multigraph_merge_inplace(self): + G = nx.MultiGraph([(0, 1), (0, 2), (0, 3), (0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + # No ordering on keys enforced + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + + def test_relabel_multidigraph_merge_inplace(self): + G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + # No ordering on keys enforced + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + + def test_relabel_multidigraph_inout_copy(self): + G = nx.MultiDiGraph([(0, 4), (1, 4), (4, 2), (4, 3)]) + G[0][4][0]["value"] = "a" + G[1][4][0]["value"] = "b" + G[4][2][0]["value"] = "c" + G[4][3][0]["value"] = "d" + G.add_edge(0, 4, key="x", value="e") + G.add_edge(4, 3, key="x", value="f") + mapping = {0: 9, 1: 9, 2: 9, 3: 9} + H = nx.relabel_nodes(G, mapping, copy=True) + # No ordering on keys enforced + assert {"value": "a"} in H[9][4].values() + assert {"value": "b"} in H[9][4].values() + assert {"value": "c"} in H[4][9].values() + assert len(H[4][9]) == 3 + assert {"value": "d"} in H[4][9].values() + assert {"value": "e"} in H[9][4].values() + assert {"value": "f"} in H[4][9].values() + assert len(H[9][4]) == 3 + + def test_relabel_multigraph_merge_copy(self): + G = nx.MultiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + H = nx.relabel_nodes(G, mapping, copy=True) + assert {"value": "a"} in H[0][4].values() + assert {"value": "b"} in H[0][4].values() + assert {"value": "c"} in H[0][4].values() + + def test_relabel_multidigraph_merge_copy(self): + G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + H = nx.relabel_nodes(G, mapping, copy=True) + assert {"value": "a"} in H[0][4].values() + assert {"value": "b"} in H[0][4].values() + assert {"value": "c"} in H[0][4].values() + + def test_relabel_multigraph_nonnumeric_key(self): + for MG in (nx.MultiGraph, nx.MultiDiGraph): + for cc in (True, False): + G = nx.MultiGraph() + G.add_edge(0, 1, key="I", value="a") + G.add_edge(0, 2, key="II", value="b") + G.add_edge(0, 3, key="II", value="c") + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + assert 0 in G[0][4] + assert "I" in G[0][4] + assert "II" in G[0][4] + + def test_relabel_circular(self): + G = nx.path_graph(3) + mapping = {0: 1, 1: 0} + H = nx.relabel_nodes(G, mapping, copy=True) + with pytest.raises(nx.NetworkXUnfeasible): + H = nx.relabel_nodes(G, mapping, copy=False) + + def test_relabel_preserve_node_order_full_mapping_with_copy_true(self): + G = nx.path_graph(3) + original_order = list(G.nodes()) + mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=True) + new_order = list(H.nodes()) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_full_mapping_with_copy_false(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=False) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_partial_mapping_with_copy_true(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=True) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_partial_mapping_with_copy_false(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=False) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] != new_order diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..48f02c18873c94098ec234cdc39ca3c8cf0a5833 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__init__.py @@ -0,0 +1,6 @@ +from networkx.utils.misc import * +from networkx.utils.decorators import * +from networkx.utils.random_sequence import * +from networkx.utils.union_find import * +from networkx.utils.rcm import * +from networkx.utils.heaps import * diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/backends.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/backends.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1fb91494527c251abe7bd26dabd968c77f13448 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/__pycache__/backends.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3401d0c3c4e3dc2956fc38dc24dcae55360fb417 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/build_env.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/build_env.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f5b8ab52730e914039636508f18ebe3a43ee5ab Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/__pycache__/build_env.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92e1b76921b4e4a18d45365acd61dcd179053748 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/index_command.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/main.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/main.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eed3f18e5c74bb8c486d02143699d63a0ac051c6 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/main.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ffe6db19849878152e8be5b769c1c139bb54fc9 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/parser.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50c28514c3a0a31e90332531c6e9244fe4fb27ac Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/parser.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68dbcc3fc59eb1131310a3302e2ec5788a4916a3 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5baf2dac15f8920f60ca5614f7b8b837fda9d302 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..589daa02347fab63bfeeeef13099ff6ca836abeb Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/autocompletion.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/autocompletion.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f70ac8553b069d2d7f87926e6ba12f8998b5e5 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/cli/autocompletion.py @@ -0,0 +1,176 @@ +"""Logic that powers autocompletion installed by ``pip completion``. +""" + +import optparse +import os +import sys +from itertools import chain +from typing import Any, Iterable, List, Optional + +from pip._internal.cli.main_parser import create_main_parser +from pip._internal.commands import commands_dict, create_command +from pip._internal.metadata import get_default_environment + + +def autocomplete() -> None: + """Entry Point for completion of main and subcommand options.""" + # Don't complete if user hasn't sourced bash_completion file. + if "PIP_AUTO_COMPLETE" not in os.environ: + return + # Don't complete if autocompletion environment variables + # are not present + if not os.environ.get("COMP_WORDS") or not os.environ.get("COMP_CWORD"): + return + cwords = os.environ["COMP_WORDS"].split()[1:] + cword = int(os.environ["COMP_CWORD"]) + try: + current = cwords[cword - 1] + except IndexError: + current = "" + + parser = create_main_parser() + subcommands = list(commands_dict) + options = [] + + # subcommand + subcommand_name: Optional[str] = None + for word in cwords: + if word in subcommands: + subcommand_name = word + break + # subcommand options + if subcommand_name is not None: + # special case: 'help' subcommand has no options + if subcommand_name == "help": + sys.exit(1) + # special case: list locally installed dists for show and uninstall + should_list_installed = not current.startswith("-") and subcommand_name in [ + "show", + "uninstall", + ] + if should_list_installed: + env = get_default_environment() + lc = current.lower() + installed = [ + dist.canonical_name + for dist in env.iter_installed_distributions(local_only=True) + if dist.canonical_name.startswith(lc) + and dist.canonical_name not in cwords[1:] + ] + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + should_list_installables = ( + not current.startswith("-") and subcommand_name == "install" + ) + if should_list_installables: + for path in auto_complete_paths(current, "path"): + print(path) + sys.exit(1) + + subcommand = create_command(subcommand_name) + + for opt in subcommand.parser.option_list_all: + if opt.help != optparse.SUPPRESS_HELP: + options += [ + (opt_str, opt.nargs) for opt_str in opt._long_opts + opt._short_opts + ] + + # filter out previously specified options from available options + prev_opts = [x.split("=")[0] for x in cwords[1 : cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + # get completion type given cwords and available subcommand options + completion_type = get_path_completion_type( + cwords, + cword, + subcommand.parser.option_list_all, + ) + # get completion files and directories if ``completion_type`` is + # ````, ```` or ```` + if completion_type: + paths = auto_complete_paths(current, completion_type) + options = [(path, 0) for path in paths] + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1] and option[0][:2] == "--": + opt_label += "=" + print(opt_label) + else: + # show main parser options only when necessary + + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + flattened_opts = chain.from_iterable(opts) + if current.startswith("-"): + for opt in flattened_opts: + if opt.help != optparse.SUPPRESS_HELP: + subcommands += opt._long_opts + opt._short_opts + else: + # get completion type given cwords and all available options + completion_type = get_path_completion_type(cwords, cword, flattened_opts) + if completion_type: + subcommands = list(auto_complete_paths(current, completion_type)) + + print(" ".join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def get_path_completion_type( + cwords: List[str], cword: int, opts: Iterable[Any] +) -> Optional[str]: + """Get the type of path completion (``file``, ``dir``, ``path`` or None) + + :param cwords: same as the environmental variable ``COMP_WORDS`` + :param cword: same as the environmental variable ``COMP_CWORD`` + :param opts: The available options to check + :return: path completion type (``file``, ``dir``, ``path`` or None) + """ + if cword < 2 or not cwords[cword - 2].startswith("-"): + return None + for opt in opts: + if opt.help == optparse.SUPPRESS_HELP: + continue + for o in str(opt).split("/"): + if cwords[cword - 2].split("=")[0] == o: + if not opt.metavar or any( + x in ("path", "file", "dir") for x in opt.metavar.split("/") + ): + return opt.metavar + return None + + +def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]: + """If ``completion_type`` is ``file`` or ``path``, list all regular files + and directories starting with ``current``; otherwise only list directories + starting with ``current``. + + :param current: The word to be completed + :param completion_type: path completion type(``file``, ``path`` or ``dir``) + :return: A generator of regular files and/or directories + """ + directory, filename = os.path.split(current) + current_path = os.path.abspath(directory) + # Don't complete paths if they can't be accessed + if not os.access(current_path, os.R_OK): + return + filename = os.path.normcase(filename) + # list all files that start with ``filename`` + file_list = ( + x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename) + ) + for f in file_list: + opt = os.path.join(current_path, f) + comp_file = os.path.normcase(os.path.join(directory, f)) + # complete regular files when there is not ```` after option + # complete directories when there is ````, ```` or + # ````after option + if completion_type != "dir" and os.path.isfile(opt): + yield comp_file + elif os.path.isdir(opt): + yield os.path.join(comp_file, "") diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9d524b1f0c724e26624bd56a983710f5fdbabe4 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/cache.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/cache.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de1e2bfc4ed70e0c12cd27b9639c12c6d0d5215b Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/cache.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/check.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/check.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3a7995b27b562d1c3cc3a11eef36c1b620d60a7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/check.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/completion.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/completion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f59a7124d13ac1540834742ae176fa3f5662aa Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/completion.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/debug.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/debug.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9651d6636048d9a519c22b54c4ea56d764a18888 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/debug.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/download.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/download.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b6ee12b94565999341ded986eee913197a70bc9 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/download.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee8f171363a5b45e43aaadd87b043ed6aa5f702c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/search.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/search.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2cf02148b78f61d425df2cfe3704aa0c035fd2f Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/search.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/show.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/show.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72617c378b8486e79af2c9f341f56807e15db562 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/show.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49872f3c921df5783fa9034f73377a6f675db16a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/locations/__pycache__/base.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/locations/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e922d284f85e1b2b7e1eb27eb36120bfd1a30f7 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/locations/__pycache__/base.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/locations/_distutils.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/locations/_distutils.py new file mode 100644 index 0000000000000000000000000000000000000000..3d856256986f68b1bc38d012cfc96f8075268493 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/locations/_distutils.py @@ -0,0 +1,172 @@ +"""Locations where we look for configs, install stuff, etc""" + +# The following comment should be removed at some point in the future. +# mypy: strict-optional=False + +# If pip's going to use distutils, it should not be using the copy that setuptools +# might have injected into the environment. This is done by removing the injected +# shim, if it's injected. +# +# See https://github.com/pypa/pip/issues/8761 for the original discussion and +# rationale for why this is done within pip. +try: + __import__("_distutils_hack").remove_shim() +except (ImportError, AttributeError): + pass + +import logging +import os +import sys +from distutils.cmd import Command as DistutilsCommand +from distutils.command.install import SCHEME_KEYS +from distutils.command.install import install as distutils_install_command +from distutils.sysconfig import get_python_lib +from typing import Dict, List, Optional, Union + +from pip._internal.models.scheme import Scheme +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.virtualenv import running_under_virtualenv + +from .base import get_major_minor_version + +logger = logging.getLogger(__name__) + + +def distutils_scheme( + dist_name: str, + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, + *, + ignore_config_files: bool = False, +) -> Dict[str, str]: + """ + Return a distutils install scheme + """ + from distutils.dist import Distribution + + dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name} + if isolated: + dist_args["script_args"] = ["--no-user-cfg"] + + d = Distribution(dist_args) + if not ignore_config_files: + try: + d.parse_config_files() + except UnicodeDecodeError: + paths = d.find_config_files() + logger.warning( + "Ignore distutils configs in %s due to encoding errors.", + ", ".join(os.path.basename(p) for p in paths), + ) + obj: Optional[DistutilsCommand] = None + obj = d.get_command_obj("install", create=True) + assert obj is not None + i: distutils_install_command = obj + # NOTE: setting user or home has the side-effect of creating the home dir + # or user base for installations during finalize_options() + # ideally, we'd prefer a scheme class that has no side-effects. + assert not (user and prefix), f"user={user} prefix={prefix}" + assert not (home and prefix), f"home={home} prefix={prefix}" + i.user = user or i.user + if user or home: + i.prefix = "" + i.prefix = prefix or i.prefix + i.home = home or i.home + i.root = root or i.root + i.finalize_options() + + scheme: Dict[str, str] = {} + for key in SCHEME_KEYS: + scheme[key] = getattr(i, "install_" + key) + + # install_lib specified in setup.cfg should install *everything* + # into there (i.e. it takes precedence over both purelib and + # platlib). Note, i.install_lib is *always* set after + # finalize_options(); we only want to override here if the user + # has explicitly requested it hence going back to the config + if "install_lib" in d.get_option_dict("install"): + scheme.update({"purelib": i.install_lib, "platlib": i.install_lib}) + + if running_under_virtualenv(): + if home: + prefix = home + elif user: + prefix = i.install_userbase + else: + prefix = i.prefix + scheme["headers"] = os.path.join( + prefix, + "include", + "site", + f"python{get_major_minor_version()}", + dist_name, + ) + + if root is not None: + path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1] + scheme["headers"] = os.path.join(root, path_no_drive[1:]) + + return scheme + + +def get_scheme( + dist_name: str, + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, +) -> Scheme: + """ + Get the "scheme" corresponding to the input parameters. The distutils + documentation provides the context for the available schemes: + https://docs.python.org/3/install/index.html#alternate-installation + + :param dist_name: the name of the package to retrieve the scheme for, used + in the headers scheme path + :param user: indicates to use the "user" scheme + :param home: indicates to use the "home" scheme and provides the base + directory for the same + :param root: root under which other directories are re-based + :param isolated: equivalent to --no-user-cfg, i.e. do not consider + ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for + scheme paths + :param prefix: indicates to use the "prefix" scheme and provides the + base directory for the same + """ + scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix) + return Scheme( + platlib=scheme["platlib"], + purelib=scheme["purelib"], + headers=scheme["headers"], + scripts=scheme["scripts"], + data=scheme["data"], + ) + + +def get_bin_prefix() -> str: + # XXX: In old virtualenv versions, sys.prefix can contain '..' components, + # so we need to call normpath to eliminate them. + prefix = os.path.normpath(sys.prefix) + if WINDOWS: + bin_py = os.path.join(prefix, "Scripts") + # buildout uses 'bin' on Windows too? + if not os.path.exists(bin_py): + bin_py = os.path.join(prefix, "bin") + return bin_py + # Forcing to use /usr/local/bin for standard macOS framework installs + # Also log to ~/Library/Logs/ for use with the Console.app log viewer + if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/": + return "/usr/local/bin" + return os.path.join(prefix, "bin") + + +def get_purelib() -> str: + return get_python_lib(plat_specific=False) + + +def get_platlib() -> str: + return get_python_lib(plat_specific=True) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__init__.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7855226e4b500142deef8fb247cd33a9a991d122 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__init__.py @@ -0,0 +1,2 @@ +"""A package that contains models that represent entities. +""" diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/candidate.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/candidate.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8088b6a3fb0b2dc068da80f64e10ad12c249217 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/candidate.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dedc16fea2f9f4931ba13987d41d07644034223 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/scheme.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/scheme.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8247eead8efea00473c6f930cc14e8921f5f26ff Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/scheme.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30dedbedd66c38a7d973fd43d59c3f11a6908d7c Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/target_python.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/target_python.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228b16aa299eebbd272c31f1b1a7971957298d20 Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/target_python.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/wheel.cpython-311.pyc b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/wheel.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc9f321fdbe7084a426fca06a2e89e06dc66934a Binary files /dev/null and b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/__pycache__/wheel.cpython-311.pyc differ diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/format_control.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/format_control.py new file mode 100644 index 0000000000000000000000000000000000000000..ccd11272c030c2d067e1bb6d90fc744c7379a923 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/format_control.py @@ -0,0 +1,78 @@ +from typing import FrozenSet, Optional, Set + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.exceptions import CommandError + + +class FormatControl: + """Helper for managing formats from which a package can be installed.""" + + __slots__ = ["no_binary", "only_binary"] + + def __init__( + self, + no_binary: Optional[Set[str]] = None, + only_binary: Optional[Set[str]] = None, + ) -> None: + if no_binary is None: + no_binary = set() + if only_binary is None: + only_binary = set() + + self.no_binary = no_binary + self.only_binary = only_binary + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + + if self.__slots__ != other.__slots__: + return False + + return all(getattr(self, k) == getattr(other, k) for k in self.__slots__) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.no_binary}, {self.only_binary})" + + @staticmethod + def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None: + if value.startswith("-"): + raise CommandError( + "--no-binary / --only-binary option requires 1 argument." + ) + new = value.split(",") + while ":all:" in new: + other.clear() + target.clear() + target.add(":all:") + del new[: new.index(":all:") + 1] + # Without a none, we want to discard everything as :all: covers it + if ":none:" not in new: + return + for name in new: + if name == ":none:": + target.clear() + continue + name = canonicalize_name(name) + other.discard(name) + target.add(name) + + def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]: + result = {"binary", "source"} + if canonical_name in self.only_binary: + result.discard("source") + elif canonical_name in self.no_binary: + result.discard("binary") + elif ":all:" in self.only_binary: + result.discard("source") + elif ":all:" in self.no_binary: + result.discard("binary") + return frozenset(result) + + def disallow_binaries(self) -> None: + self.handle_mutual_excludes( + ":all:", + self.no_binary, + self.only_binary, + ) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/index.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/index.py new file mode 100644 index 0000000000000000000000000000000000000000..b94c32511f0cda2363bfc4f29c9c8bfcc7101f9b --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/index.py @@ -0,0 +1,28 @@ +import urllib.parse + + +class PackageIndex: + """Represents a Package Index and provides easier access to endpoints""" + + __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"] + + def __init__(self, url: str, file_storage_domain: str) -> None: + super().__init__() + self.url = url + self.netloc = urllib.parse.urlsplit(url).netloc + self.simple_url = self._url_for_path("simple") + self.pypi_url = self._url_for_path("pypi") + + # This is part of a temporary hack used to block installs of PyPI + # packages which depend on external urls only necessary until PyPI can + # block such packages themselves + self.file_storage_domain = file_storage_domain + + def _url_for_path(self, path: str) -> str: + return urllib.parse.urljoin(self.url, path) + + +PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org") +TestPyPI = PackageIndex( + "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org" +) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/link.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/link.py new file mode 100644 index 0000000000000000000000000000000000000000..2f41f2f6a09036d321de5a2453eeafe16f152ebf --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/link.py @@ -0,0 +1,590 @@ +import functools +import itertools +import logging +import os +import posixpath +import re +import urllib.parse +from dataclasses import dataclass +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Mapping, + NamedTuple, + Optional, + Tuple, + Union, +) + +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.filetypes import WHEEL_EXTENSION +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.misc import ( + pairwise, + redact_auth_from_url, + split_auth_from_netloc, + splitext, +) +from pip._internal.utils.urls import path_to_url, url_to_path + +if TYPE_CHECKING: + from pip._internal.index.collector import IndexContent + +logger = logging.getLogger(__name__) + + +# Order matters, earlier hashes have a precedence over later hashes for what +# we will pick to use. +_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5") + + +@dataclass(frozen=True) +class LinkHash: + """Links to content may have embedded hash values. This class parses those. + + `name` must be any member of `_SUPPORTED_HASHES`. + + This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to + be JSON-serializable to conform to PEP 610, this class contains the logic for + parsing a hash name and value for correctness, and then checking whether that hash + conforms to a schema with `.is_hash_allowed()`.""" + + name: str + value: str + + _hash_url_fragment_re = re.compile( + # NB: we do not validate that the second group (.*) is a valid hex + # digest. Instead, we simply keep that string in this class, and then check it + # against Hashes when hash-checking is needed. This is easier to debug than + # proactively discarding an invalid hex digest, as we handle incorrect hashes + # and malformed hashes in the same place. + r"[#&]({choices})=([^&]*)".format( + choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES) + ), + ) + + def __post_init__(self) -> None: + assert self.name in _SUPPORTED_HASHES + + @classmethod + @functools.lru_cache(maxsize=None) + def find_hash_url_fragment(cls, url: str) -> Optional["LinkHash"]: + """Search a string for a checksum algorithm name and encoded output value.""" + match = cls._hash_url_fragment_re.search(url) + if match is None: + return None + name, value = match.groups() + return cls(name=name, value=value) + + def as_dict(self) -> Dict[str, str]: + return {self.name: self.value} + + def as_hashes(self) -> Hashes: + """Return a Hashes instance which checks only for the current hash.""" + return Hashes({self.name: [self.value]}) + + def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool: + """ + Return True if the current hash is allowed by `hashes`. + """ + if hashes is None: + return False + return hashes.is_hash_allowed(self.name, hex_digest=self.value) + + +@dataclass(frozen=True) +class MetadataFile: + """Information about a core metadata file associated with a distribution.""" + + hashes: Optional[Dict[str, str]] + + def __post_init__(self) -> None: + if self.hashes is not None: + assert all(name in _SUPPORTED_HASHES for name in self.hashes) + + +def supported_hashes(hashes: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]: + # Remove any unsupported hash types from the mapping. If this leaves no + # supported hashes, return None + if hashes is None: + return None + hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES} + if not hashes: + return None + return hashes + + +def _clean_url_path_part(part: str) -> str: + """ + Clean a "part" of a URL path (i.e. after splitting on "@" characters). + """ + # We unquote prior to quoting to make sure nothing is double quoted. + return urllib.parse.quote(urllib.parse.unquote(part)) + + +def _clean_file_url_path(part: str) -> str: + """ + Clean the first part of a URL path that corresponds to a local + filesystem path (i.e. the first part after splitting on "@" characters). + """ + # We unquote prior to quoting to make sure nothing is double quoted. + # Also, on Windows the path part might contain a drive letter which + # should not be quoted. On Linux where drive letters do not + # exist, the colon should be quoted. We rely on urllib.request + # to do the right thing here. + return urllib.request.pathname2url(urllib.request.url2pathname(part)) + + +# percent-encoded: / +_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE) + + +def _clean_url_path(path: str, is_local_path: bool) -> str: + """ + Clean the path portion of a URL. + """ + if is_local_path: + clean_func = _clean_file_url_path + else: + clean_func = _clean_url_path_part + + # Split on the reserved characters prior to cleaning so that + # revision strings in VCS URLs are properly preserved. + parts = _reserved_chars_re.split(path) + + cleaned_parts = [] + for to_clean, reserved in pairwise(itertools.chain(parts, [""])): + cleaned_parts.append(clean_func(to_clean)) + # Normalize %xx escapes (e.g. %2f -> %2F) + cleaned_parts.append(reserved.upper()) + + return "".join(cleaned_parts) + + +def _ensure_quoted_url(url: str) -> str: + """ + Make sure a link is fully quoted. + For example, if ' ' occurs in the URL, it will be replaced with "%20", + and without double-quoting other characters. + """ + # Split the URL into parts according to the general structure + # `scheme://netloc/path;parameters?query#fragment`. + result = urllib.parse.urlparse(url) + # If the netloc is empty, then the URL refers to a local filesystem path. + is_local_path = not result.netloc + path = _clean_url_path(result.path, is_local_path=is_local_path) + return urllib.parse.urlunparse(result._replace(path=path)) + + +@functools.total_ordering +class Link: + """Represents a parsed link from a Package Index's simple URL""" + + __slots__ = [ + "_parsed_url", + "_url", + "_hashes", + "comes_from", + "requires_python", + "yanked_reason", + "metadata_file_data", + "cache_link_parsing", + "egg_fragment", + ] + + def __init__( + self, + url: str, + comes_from: Optional[Union[str, "IndexContent"]] = None, + requires_python: Optional[str] = None, + yanked_reason: Optional[str] = None, + metadata_file_data: Optional[MetadataFile] = None, + cache_link_parsing: bool = True, + hashes: Optional[Mapping[str, str]] = None, + ) -> None: + """ + :param url: url of the resource pointed to (href of the link) + :param comes_from: instance of IndexContent where the link was found, + or string. + :param requires_python: String containing the `Requires-Python` + metadata field, specified in PEP 345. This may be specified by + a data-requires-python attribute in the HTML link tag, as + described in PEP 503. + :param yanked_reason: the reason the file has been yanked, if the + file has been yanked, or None if the file hasn't been yanked. + This is the value of the "data-yanked" attribute, if present, in + a simple repository HTML link. If the file has been yanked but + no reason was provided, this should be the empty string. See + PEP 592 for more information and the specification. + :param metadata_file_data: the metadata attached to the file, or None if + no such metadata is provided. This argument, if not None, indicates + that a separate metadata file exists, and also optionally supplies + hashes for that file. + :param cache_link_parsing: A flag that is used elsewhere to determine + whether resources retrieved from this link should be cached. PyPI + URLs should generally have this set to False, for example. + :param hashes: A mapping of hash names to digests to allow us to + determine the validity of a download. + """ + + # The comes_from, requires_python, and metadata_file_data arguments are + # only used by classmethods of this class, and are not used in client + # code directly. + + # url can be a UNC windows share + if url.startswith("\\\\"): + url = path_to_url(url) + + self._parsed_url = urllib.parse.urlsplit(url) + # Store the url as a private attribute to prevent accidentally + # trying to set a new value. + self._url = url + + link_hash = LinkHash.find_hash_url_fragment(url) + hashes_from_link = {} if link_hash is None else link_hash.as_dict() + if hashes is None: + self._hashes = hashes_from_link + else: + self._hashes = {**hashes, **hashes_from_link} + + self.comes_from = comes_from + self.requires_python = requires_python if requires_python else None + self.yanked_reason = yanked_reason + self.metadata_file_data = metadata_file_data + + self.cache_link_parsing = cache_link_parsing + self.egg_fragment = self._egg_fragment() + + @classmethod + def from_json( + cls, + file_data: Dict[str, Any], + page_url: str, + ) -> Optional["Link"]: + """ + Convert an pypi json document from a simple repository page into a Link. + """ + file_url = file_data.get("url") + if file_url is None: + return None + + url = _ensure_quoted_url(urllib.parse.urljoin(page_url, file_url)) + pyrequire = file_data.get("requires-python") + yanked_reason = file_data.get("yanked") + hashes = file_data.get("hashes", {}) + + # PEP 714: Indexes must use the name core-metadata, but + # clients should support the old name as a fallback for compatibility. + metadata_info = file_data.get("core-metadata") + if metadata_info is None: + metadata_info = file_data.get("dist-info-metadata") + + # The metadata info value may be a boolean, or a dict of hashes. + if isinstance(metadata_info, dict): + # The file exists, and hashes have been supplied + metadata_file_data = MetadataFile(supported_hashes(metadata_info)) + elif metadata_info: + # The file exists, but there are no hashes + metadata_file_data = MetadataFile(None) + else: + # False or not present: the file does not exist + metadata_file_data = None + + # The Link.yanked_reason expects an empty string instead of a boolean. + if yanked_reason and not isinstance(yanked_reason, str): + yanked_reason = "" + # The Link.yanked_reason expects None instead of False. + elif not yanked_reason: + yanked_reason = None + + return cls( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + hashes=hashes, + metadata_file_data=metadata_file_data, + ) + + @classmethod + def from_element( + cls, + anchor_attribs: Dict[str, Optional[str]], + page_url: str, + base_url: str, + ) -> Optional["Link"]: + """ + Convert an anchor element's attributes in a simple repository page to a Link. + """ + href = anchor_attribs.get("href") + if not href: + return None + + url = _ensure_quoted_url(urllib.parse.urljoin(base_url, href)) + pyrequire = anchor_attribs.get("data-requires-python") + yanked_reason = anchor_attribs.get("data-yanked") + + # PEP 714: Indexes must use the name data-core-metadata, but + # clients should support the old name as a fallback for compatibility. + metadata_info = anchor_attribs.get("data-core-metadata") + if metadata_info is None: + metadata_info = anchor_attribs.get("data-dist-info-metadata") + # The metadata info value may be the string "true", or a string of + # the form "hashname=hashval" + if metadata_info == "true": + # The file exists, but there are no hashes + metadata_file_data = MetadataFile(None) + elif metadata_info is None: + # The file does not exist + metadata_file_data = None + else: + # The file exists, and hashes have been supplied + hashname, sep, hashval = metadata_info.partition("=") + if sep == "=": + metadata_file_data = MetadataFile(supported_hashes({hashname: hashval})) + else: + # Error - data is wrong. Treat as no hashes supplied. + logger.debug( + "Index returned invalid data-dist-info-metadata value: %s", + metadata_info, + ) + metadata_file_data = MetadataFile(None) + + return cls( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + metadata_file_data=metadata_file_data, + ) + + def __str__(self) -> str: + if self.requires_python: + rp = f" (requires-python:{self.requires_python})" + else: + rp = "" + if self.comes_from: + return f"{redact_auth_from_url(self._url)} (from {self.comes_from}){rp}" + else: + return redact_auth_from_url(str(self._url)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash(self.url) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url == other.url + + def __lt__(self, other: Any) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url < other.url + + @property + def url(self) -> str: + return self._url + + @property + def filename(self) -> str: + path = self.path.rstrip("/") + name = posixpath.basename(path) + if not name: + # Make sure we don't leak auth information if the netloc + # includes a username and password. + netloc, user_pass = split_auth_from_netloc(self.netloc) + return netloc + + name = urllib.parse.unquote(name) + assert name, f"URL {self._url!r} produced no filename" + return name + + @property + def file_path(self) -> str: + return url_to_path(self.url) + + @property + def scheme(self) -> str: + return self._parsed_url.scheme + + @property + def netloc(self) -> str: + """ + This can contain auth information. + """ + return self._parsed_url.netloc + + @property + def path(self) -> str: + return urllib.parse.unquote(self._parsed_url.path) + + def splitext(self) -> Tuple[str, str]: + return splitext(posixpath.basename(self.path.rstrip("/"))) + + @property + def ext(self) -> str: + return self.splitext()[1] + + @property + def url_without_fragment(self) -> str: + scheme, netloc, path, query, fragment = self._parsed_url + return urllib.parse.urlunsplit((scheme, netloc, path, query, "")) + + _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") + + # Per PEP 508. + _project_name_re = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE + ) + + def _egg_fragment(self) -> Optional[str]: + match = self._egg_fragment_re.search(self._url) + if not match: + return None + + # An egg fragment looks like a PEP 508 project name, along with + # an optional extras specifier. Anything else is invalid. + project_name = match.group(1) + if not self._project_name_re.match(project_name): + deprecated( + reason=f"{self} contains an egg fragment with a non-PEP 508 name", + replacement="to use the req @ url syntax, and remove the egg fragment", + gone_in="25.0", + issue=11617, + ) + + return project_name + + _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") + + @property + def subdirectory_fragment(self) -> Optional[str]: + match = self._subdirectory_fragment_re.search(self._url) + if not match: + return None + return match.group(1) + + def metadata_link(self) -> Optional["Link"]: + """Return a link to the associated core metadata file (if any).""" + if self.metadata_file_data is None: + return None + metadata_url = f"{self.url_without_fragment}.metadata" + if self.metadata_file_data.hashes is None: + return Link(metadata_url) + return Link(metadata_url, hashes=self.metadata_file_data.hashes) + + def as_hashes(self) -> Hashes: + return Hashes({k: [v] for k, v in self._hashes.items()}) + + @property + def hash(self) -> Optional[str]: + return next(iter(self._hashes.values()), None) + + @property + def hash_name(self) -> Optional[str]: + return next(iter(self._hashes), None) + + @property + def show_url(self) -> str: + return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0]) + + @property + def is_file(self) -> bool: + return self.scheme == "file" + + def is_existing_dir(self) -> bool: + return self.is_file and os.path.isdir(self.file_path) + + @property + def is_wheel(self) -> bool: + return self.ext == WHEEL_EXTENSION + + @property + def is_vcs(self) -> bool: + from pip._internal.vcs import vcs + + return self.scheme in vcs.all_schemes + + @property + def is_yanked(self) -> bool: + return self.yanked_reason is not None + + @property + def has_hash(self) -> bool: + return bool(self._hashes) + + def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool: + """ + Return True if the link has a hash and it is allowed by `hashes`. + """ + if hashes is None: + return False + return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items()) + + +class _CleanResult(NamedTuple): + """Convert link for equivalency check. + + This is used in the resolver to check whether two URL-specified requirements + likely point to the same distribution and can be considered equivalent. This + equivalency logic avoids comparing URLs literally, which can be too strict + (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users. + + Currently this does three things: + + 1. Drop the basic auth part. This is technically wrong since a server can + serve different content based on auth, but if it does that, it is even + impossible to guarantee two URLs without auth are equivalent, since + the user can input different auth information when prompted. So the + practical solution is to assume the auth doesn't affect the response. + 2. Parse the query to avoid the ordering issue. Note that ordering under the + same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are + still considered different. + 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and + hash values, since it should have no impact the downloaded content. Note + that this drops the "egg=" part historically used to denote the requested + project (and extras), which is wrong in the strictest sense, but too many + people are supplying it inconsistently to cause superfluous resolution + conflicts, so we choose to also ignore them. + """ + + parsed: urllib.parse.SplitResult + query: Dict[str, List[str]] + subdirectory: str + hashes: Dict[str, str] + + +def _clean_link(link: Link) -> _CleanResult: + parsed = link._parsed_url + netloc = parsed.netloc.rsplit("@", 1)[-1] + # According to RFC 8089, an empty host in file: means localhost. + if parsed.scheme == "file" and not netloc: + netloc = "localhost" + fragment = urllib.parse.parse_qs(parsed.fragment) + if "egg" in fragment: + logger.debug("Ignoring egg= fragment in %s", link) + try: + # If there are multiple subdirectory values, use the first one. + # This matches the behavior of Link.subdirectory_fragment. + subdirectory = fragment["subdirectory"][0] + except (IndexError, KeyError): + subdirectory = "" + # If there are multiple hash values under the same algorithm, use the + # first one. This matches the behavior of Link.hash_value. + hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment} + return _CleanResult( + parsed=parsed._replace(netloc=netloc, query="", fragment=""), + query=urllib.parse.parse_qs(parsed.query), + subdirectory=subdirectory, + hashes=hashes, + ) + + +@functools.lru_cache(maxsize=None) +def links_equivalent(link1: Link, link2: Link) -> bool: + return _clean_link(link1) == _clean_link(link2) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/wheel.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/wheel.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8560089d3df41689f41fe2639aa9f61dd1eace --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/models/wheel.py @@ -0,0 +1,118 @@ +"""Represents a wheel file and provides access to the various parts of the +name that have meaning. +""" + +import re +from typing import Dict, Iterable, List + +from pip._vendor.packaging.tags import Tag +from pip._vendor.packaging.utils import ( + InvalidWheelFilename as PackagingInvalidWheelName, +) +from pip._vendor.packaging.utils import parse_wheel_filename + +from pip._internal.exceptions import InvalidWheelFilename +from pip._internal.utils.deprecation import deprecated + + +class Wheel: + """A wheel file""" + + wheel_file_re = re.compile( + r"""^(?P(?P[^\s-]+?)-(?P[^\s-]*?)) + ((-(?P\d[^-]*?))?-(?P[^\s-]+?)-(?P[^\s-]+?)-(?P[^\s-]+?) + \.whl|\.dist-info)$""", + re.VERBOSE, + ) + + def __init__(self, filename: str) -> None: + """ + :raises InvalidWheelFilename: when the filename is invalid for a wheel + """ + wheel_info = self.wheel_file_re.match(filename) + if not wheel_info: + raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.") + self.filename = filename + self.name = wheel_info.group("name").replace("_", "-") + _version = wheel_info.group("ver") + if "_" in _version: + try: + parse_wheel_filename(filename) + except PackagingInvalidWheelName as e: + deprecated( + reason=( + f"Wheel filename {filename!r} is not correctly normalised. " + "Future versions of pip will raise the following error:\n" + f"{e.args[0]}\n\n" + ), + replacement=( + "to rename the wheel to use a correctly normalised " + "name (this may require updating the version in " + "the project metadata)" + ), + gone_in="25.1", + issue=12938, + ) + + _version = _version.replace("_", "-") + + self.version = _version + self.build_tag = wheel_info.group("build") + self.pyversions = wheel_info.group("pyver").split(".") + self.abis = wheel_info.group("abi").split(".") + self.plats = wheel_info.group("plat").split(".") + + # All the tag combinations from this file + self.file_tags = { + Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats + } + + def get_formatted_file_tags(self) -> List[str]: + """Return the wheel's tags as a sorted list of strings.""" + return sorted(str(tag) for tag in self.file_tags) + + def support_index_min(self, tags: List[Tag]) -> int: + """Return the lowest index that one of the wheel's file_tag combinations + achieves in the given list of supported tags. + + For example, if there are 8 supported tags and one of the file tags + is first in the list, then return 0. + + :param tags: the PEP 425 tags to check the wheel against, in order + with most preferred first. + + :raises ValueError: If none of the wheel's file tags match one of + the supported tags. + """ + try: + return next(i for i, t in enumerate(tags) if t in self.file_tags) + except StopIteration: + raise ValueError() + + def find_most_preferred_tag( + self, tags: List[Tag], tag_to_priority: Dict[Tag, int] + ) -> int: + """Return the priority of the most preferred tag that one of the wheel's file + tag combinations achieves in the given list of supported tags using the given + tag_to_priority mapping, where lower priorities are more-preferred. + + This is used in place of support_index_min in some cases in order to avoid + an expensive linear scan of a large list of tags. + + :param tags: the PEP 425 tags to check the wheel against. + :param tag_to_priority: a mapping from tag to priority of that tag, where + lower is more preferred. + + :raises ValueError: If none of the wheel's file tags match one of + the supported tags. + """ + return min( + tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority + ) + + def supported(self, tags: Iterable[Tag]) -> bool: + """Return whether the wheel is compatible with one of the given tags. + + :param tags: the PEP 425 tags to check the wheel against. + """ + return not self.file_tags.isdisjoint(tags) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/appdirs.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/appdirs.py new file mode 100644 index 0000000000000000000000000000000000000000..16933bf8afedcbe3e9d4fcc04e5f7246228c56fc --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/appdirs.py @@ -0,0 +1,52 @@ +""" +This code wraps the vendored appdirs module to so the return values are +compatible for the current pip code base. + +The intention is to rewrite current usages gradually, keeping the tests pass, +and eventually drop this after all usages are changed. +""" + +import os +import sys +from typing import List + +from pip._vendor import platformdirs as _appdirs + + +def user_cache_dir(appname: str) -> str: + return _appdirs.user_cache_dir(appname, appauthor=False) + + +def _macos_user_config_dir(appname: str, roaming: bool = True) -> str: + # Use ~/Application Support/pip, if the directory exists. + path = _appdirs.user_data_dir(appname, appauthor=False, roaming=roaming) + if os.path.isdir(path): + return path + + # Use a Linux-like ~/.config/pip, by default. + linux_like_path = "~/.config/" + if appname: + linux_like_path = os.path.join(linux_like_path, appname) + + return os.path.expanduser(linux_like_path) + + +def user_config_dir(appname: str, roaming: bool = True) -> str: + if sys.platform == "darwin": + return _macos_user_config_dir(appname, roaming) + + return _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) + + +# for the discussion regarding site_config_dir locations +# see +def site_config_dirs(appname: str) -> List[str]: + if sys.platform == "darwin": + return [_appdirs.site_data_dir(appname, appauthor=False, multipath=True)] + + dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True) + if sys.platform == "win32": + return [dirval] + + # Unix-y system. Look in /etc as well. + return dirval.split(os.pathsep) + ["/etc"] diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b54e4ee51d03a7beca065971967b9c70cc3526 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/compat.py @@ -0,0 +1,79 @@ +"""Stuff that differs in different Python versions and platform +distributions.""" + +import importlib.resources +import logging +import os +import sys +from typing import IO + +__all__ = ["get_path_uid", "stdlib_pkgs", "WINDOWS"] + + +logger = logging.getLogger(__name__) + + +def has_tls() -> bool: + try: + import _ssl # noqa: F401 # ignore unused + + return True + except ImportError: + pass + + from pip._vendor.urllib3.util import IS_PYOPENSSL + + return IS_PYOPENSSL + + +def get_path_uid(path: str) -> int: + """ + Return path's uid. + + Does not follow symlinks: + https://github.com/pypa/pip/pull/935#discussion_r5307003 + + Placed this function in compat due to differences on AIX and + Jython, that should eventually go away. + + :raises OSError: When path is a symlink or can't be read. + """ + if hasattr(os, "O_NOFOLLOW"): + fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) + file_uid = os.fstat(fd).st_uid + os.close(fd) + else: # AIX and Jython + # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW + if not os.path.islink(path): + # older versions of Jython don't have `os.fstat` + file_uid = os.stat(path).st_uid + else: + # raise OSError for parity with os.O_NOFOLLOW above + raise OSError(f"{path} is a symlink; Will not return uid for symlinks") + return file_uid + + +# The importlib.resources.open_text function was deprecated in 3.11 with suggested +# replacement we use below. +if sys.version_info < (3, 11): + open_text_resource = importlib.resources.open_text +else: + + def open_text_resource( + package: str, resource: str, encoding: str = "utf-8", errors: str = "strict" + ) -> IO[str]: + return (importlib.resources.files(package) / resource).open( + "r", encoding=encoding, errors=errors + ) + + +# packages in the stdlib that may have installation metadata, but should not be +# considered 'installed'. this theoretically could be determined based on +# dist.location (py27:`sysconfig.get_paths()['stdlib']`, +# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may +# make this ineffective, so hard-coding +stdlib_pkgs = {"python", "wsgiref", "argparse"} + + +# windows detection, covers cpython and ironpython +WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt") diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/direct_url_helpers.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/direct_url_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..66020d3964ad4d8bc55893380383b271642471f7 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/direct_url_helpers.py @@ -0,0 +1,87 @@ +from typing import Optional + +from pip._internal.models.direct_url import ArchiveInfo, DirectUrl, DirInfo, VcsInfo +from pip._internal.models.link import Link +from pip._internal.utils.urls import path_to_url +from pip._internal.vcs import vcs + + +def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> str: + """Convert a DirectUrl to a pip requirement string.""" + direct_url.validate() # if invalid, this is a pip bug + requirement = name + " @ " + fragments = [] + if isinstance(direct_url.info, VcsInfo): + requirement += ( + f"{direct_url.info.vcs}+{direct_url.url}@{direct_url.info.commit_id}" + ) + elif isinstance(direct_url.info, ArchiveInfo): + requirement += direct_url.url + if direct_url.info.hash: + fragments.append(direct_url.info.hash) + else: + assert isinstance(direct_url.info, DirInfo) + requirement += direct_url.url + if direct_url.subdirectory: + fragments.append("subdirectory=" + direct_url.subdirectory) + if fragments: + requirement += "#" + "&".join(fragments) + return requirement + + +def direct_url_for_editable(source_dir: str) -> DirectUrl: + return DirectUrl( + url=path_to_url(source_dir), + info=DirInfo(editable=True), + ) + + +def direct_url_from_link( + link: Link, source_dir: Optional[str] = None, link_is_in_wheel_cache: bool = False +) -> DirectUrl: + if link.is_vcs: + vcs_backend = vcs.get_backend_for_scheme(link.scheme) + assert vcs_backend + url, requested_revision, _ = vcs_backend.get_url_rev_and_auth( + link.url_without_fragment + ) + # For VCS links, we need to find out and add commit_id. + if link_is_in_wheel_cache: + # If the requested VCS link corresponds to a cached + # wheel, it means the requested revision was an + # immutable commit hash, otherwise it would not have + # been cached. In that case we don't have a source_dir + # with the VCS checkout. + assert requested_revision + commit_id = requested_revision + else: + # If the wheel was not in cache, it means we have + # had to checkout from VCS to build and we have a source_dir + # which we can inspect to find out the commit id. + assert source_dir + commit_id = vcs_backend.get_revision(source_dir) + return DirectUrl( + url=url, + info=VcsInfo( + vcs=vcs_backend.name, + commit_id=commit_id, + requested_revision=requested_revision, + ), + subdirectory=link.subdirectory_fragment, + ) + elif link.is_existing_dir(): + return DirectUrl( + url=link.url_without_fragment, + info=DirInfo(), + subdirectory=link.subdirectory_fragment, + ) + else: + hash = None + hash_name = link.hash_name + if hash_name: + hash = f"{hash_name}={link.hash}" + return DirectUrl( + url=link.url_without_fragment, + info=ArchiveInfo(hash=hash), + subdirectory=link.subdirectory_fragment, + ) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/egg_link.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/egg_link.py new file mode 100644 index 0000000000000000000000000000000000000000..4a384a63682ce53cafcf889551b13b9177a14e44 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/egg_link.py @@ -0,0 +1,80 @@ +import os +import re +import sys +from typing import List, Optional + +from pip._internal.locations import site_packages, user_site +from pip._internal.utils.virtualenv import ( + running_under_virtualenv, + virtualenv_no_global, +) + +__all__ = [ + "egg_link_path_from_sys_path", + "egg_link_path_from_location", +] + + +def _egg_link_names(raw_name: str) -> List[str]: + """ + Convert a Name metadata value to a .egg-link name, by applying + the same substitution as pkg_resources's safe_name function. + Note: we cannot use canonicalize_name because it has a different logic. + + We also look for the raw name (without normalization) as setuptools 69 changed + the way it names .egg-link files (https://github.com/pypa/setuptools/issues/4167). + """ + return [ + re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link", + f"{raw_name}.egg-link", + ] + + +def egg_link_path_from_sys_path(raw_name: str) -> Optional[str]: + """ + Look for a .egg-link file for project name, by walking sys.path. + """ + egg_link_names = _egg_link_names(raw_name) + for path_item in sys.path: + for egg_link_name in egg_link_names: + egg_link = os.path.join(path_item, egg_link_name) + if os.path.isfile(egg_link): + return egg_link + return None + + +def egg_link_path_from_location(raw_name: str) -> Optional[str]: + """ + Return the path for the .egg-link file if it exists, otherwise, None. + + There's 3 scenarios: + 1) not in a virtualenv + try to find in site.USER_SITE, then site_packages + 2) in a no-global virtualenv + try to find in site_packages + 3) in a yes-global virtualenv + try to find in site_packages, then site.USER_SITE + (don't look in global location) + + For #1 and #3, there could be odd cases, where there's an egg-link in 2 + locations. + + This method will just return the first one found. + """ + sites: List[str] = [] + if running_under_virtualenv(): + sites.append(site_packages) + if not virtualenv_no_global() and user_site: + sites.append(user_site) + else: + if user_site: + sites.append(user_site) + sites.append(site_packages) + + egg_link_names = _egg_link_names(raw_name) + for site in sites: + for egg_link_name in egg_link_names: + egglink = os.path.join(site, egg_link_name) + if os.path.isfile(egglink): + return egglink + return None diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/entrypoints.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/entrypoints.py new file mode 100644 index 0000000000000000000000000000000000000000..150136938548af6aa5ae1f716b330d0eb2d3e013 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/entrypoints.py @@ -0,0 +1,84 @@ +import itertools +import os +import shutil +import sys +from typing import List, Optional + +from pip._internal.cli.main import main +from pip._internal.utils.compat import WINDOWS + +_EXECUTABLE_NAMES = [ + "pip", + f"pip{sys.version_info.major}", + f"pip{sys.version_info.major}.{sys.version_info.minor}", +] +if WINDOWS: + _allowed_extensions = {"", ".exe"} + _EXECUTABLE_NAMES = [ + "".join(parts) + for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions) + ] + + +def _wrapper(args: Optional[List[str]] = None) -> int: + """Central wrapper for all old entrypoints. + + Historically pip has had several entrypoints defined. Because of issues + arising from PATH, sys.path, multiple Pythons, their interactions, and most + of them having a pip installed, users suffer every time an entrypoint gets + moved. + + To alleviate this pain, and provide a mechanism for warning users and + directing them to an appropriate place for help, we now define all of + our old entrypoints as wrappers for the current one. + """ + sys.stderr.write( + "WARNING: pip is being invoked by an old script wrapper. This will " + "fail in a future version of pip.\n" + "Please see https://github.com/pypa/pip/issues/5599 for advice on " + "fixing the underlying issue.\n" + "To avoid this problem you can invoke Python with '-m pip' instead of " + "running pip directly.\n" + ) + return main(args) + + +def get_best_invocation_for_this_pip() -> str: + """Try to figure out the best way to invoke pip in the current environment.""" + binary_directory = "Scripts" if WINDOWS else "bin" + binary_prefix = os.path.join(sys.prefix, binary_directory) + + # Try to use pip[X[.Y]] names, if those executables for this environment are + # the first on PATH with that name. + path_parts = os.path.normcase(os.environ.get("PATH", "")).split(os.pathsep) + exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts + if exe_are_in_PATH: + for exe_name in _EXECUTABLE_NAMES: + found_executable = shutil.which(exe_name) + binary_executable = os.path.join(binary_prefix, exe_name) + if ( + found_executable + and os.path.exists(binary_executable) + and os.path.samefile( + found_executable, + binary_executable, + ) + ): + return exe_name + + # Use the `-m` invocation, if there's no "nice" invocation. + return f"{get_best_invocation_for_this_python()} -m pip" + + +def get_best_invocation_for_this_python() -> str: + """Try to figure out the best way to invoke the current Python.""" + exe = sys.executable + exe_name = os.path.basename(exe) + + # Try to use the basename, if it's the first executable. + found_executable = shutil.which(exe_name) + if found_executable and os.path.samefile(found_executable, exe): + return exe_name + + # Use the full executable name, because we couldn't find something simpler. + return exe diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/hashes.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/hashes.py new file mode 100644 index 0000000000000000000000000000000000000000..535e94fca0cc8b049673ee0d02dba259c68af76c --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/hashes.py @@ -0,0 +1,147 @@ +import hashlib +from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, NoReturn, Optional + +from pip._internal.exceptions import HashMismatch, HashMissing, InstallationError +from pip._internal.utils.misc import read_chunks + +if TYPE_CHECKING: + from hashlib import _Hash + + +# The recommended hash algo of the moment. Change this whenever the state of +# the art changes; it won't hurt backward compatibility. +FAVORITE_HASH = "sha256" + + +# Names of hashlib algorithms allowed by the --hash option and ``pip hash`` +# Currently, those are the ones at least as collision-resistant as sha256. +STRONG_HASHES = ["sha256", "sha384", "sha512"] + + +class Hashes: + """A wrapper that builds multiple hashes at once and checks them against + known-good values + + """ + + def __init__(self, hashes: Optional[Dict[str, List[str]]] = None) -> None: + """ + :param hashes: A dict of algorithm names pointing to lists of allowed + hex digests + """ + allowed = {} + if hashes is not None: + for alg, keys in hashes.items(): + # Make sure values are always sorted (to ease equality checks) + allowed[alg] = [k.lower() for k in sorted(keys)] + self._allowed = allowed + + def __and__(self, other: "Hashes") -> "Hashes": + if not isinstance(other, Hashes): + return NotImplemented + + # If either of the Hashes object is entirely empty (i.e. no hash + # specified at all), all hashes from the other object are allowed. + if not other: + return self + if not self: + return other + + # Otherwise only hashes that present in both objects are allowed. + new = {} + for alg, values in other._allowed.items(): + if alg not in self._allowed: + continue + new[alg] = [v for v in values if v in self._allowed[alg]] + return Hashes(new) + + @property + def digest_count(self) -> int: + return sum(len(digests) for digests in self._allowed.values()) + + def is_hash_allowed(self, hash_name: str, hex_digest: str) -> bool: + """Return whether the given hex digest is allowed.""" + return hex_digest in self._allowed.get(hash_name, []) + + def check_against_chunks(self, chunks: Iterable[bytes]) -> None: + """Check good hashes against ones built from iterable of chunks of + data. + + Raise HashMismatch if none match. + + """ + gots = {} + for hash_name in self._allowed.keys(): + try: + gots[hash_name] = hashlib.new(hash_name) + except (ValueError, TypeError): + raise InstallationError(f"Unknown hash name: {hash_name}") + + for chunk in chunks: + for hash in gots.values(): + hash.update(chunk) + + for hash_name, got in gots.items(): + if got.hexdigest() in self._allowed[hash_name]: + return + self._raise(gots) + + def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn": + raise HashMismatch(self._allowed, gots) + + def check_against_file(self, file: BinaryIO) -> None: + """Check good hashes against a file-like object + + Raise HashMismatch if none match. + + """ + return self.check_against_chunks(read_chunks(file)) + + def check_against_path(self, path: str) -> None: + with open(path, "rb") as file: + return self.check_against_file(file) + + def has_one_of(self, hashes: Dict[str, str]) -> bool: + """Return whether any of the given hashes are allowed.""" + for hash_name, hex_digest in hashes.items(): + if self.is_hash_allowed(hash_name, hex_digest): + return True + return False + + def __bool__(self) -> bool: + """Return whether I know any known-good hashes.""" + return bool(self._allowed) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Hashes): + return NotImplemented + return self._allowed == other._allowed + + def __hash__(self) -> int: + return hash( + ",".join( + sorted( + ":".join((alg, digest)) + for alg, digest_list in self._allowed.items() + for digest in digest_list + ) + ) + ) + + +class MissingHashes(Hashes): + """A workalike for Hashes used when we're missing a hash for a requirement + + It computes the actual hash of the requirement and raises a HashMissing + exception showing it to the user. + + """ + + def __init__(self) -> None: + """Don't offer the ``hashes`` kwarg.""" + # Pass our favorite hash in to generate a "gotten hash". With the + # empty list, it will never match, so an error will always raise. + super().__init__(hashes={FAVORITE_HASH: []}) + + def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn": + raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/logging.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..41f6eb51a26b777269ce11ac716c0a02e16ca239 --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/logging.py @@ -0,0 +1,347 @@ +import contextlib +import errno +import logging +import logging.handlers +import os +import sys +import threading +from dataclasses import dataclass +from io import TextIOWrapper +from logging import Filter +from typing import Any, ClassVar, Generator, List, Optional, TextIO, Type + +from pip._vendor.rich.console import ( + Console, + ConsoleOptions, + ConsoleRenderable, + RenderableType, + RenderResult, + RichCast, +) +from pip._vendor.rich.highlighter import NullHighlighter +from pip._vendor.rich.logging import RichHandler +from pip._vendor.rich.segment import Segment +from pip._vendor.rich.style import Style + +from pip._internal.utils._log import VERBOSE, getLogger +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX +from pip._internal.utils.misc import ensure_dir + +_log_state = threading.local() +subprocess_logger = getLogger("pip.subprocessor") + + +class BrokenStdoutLoggingError(Exception): + """ + Raised if BrokenPipeError occurs for the stdout stream while logging. + """ + + +def _is_broken_pipe_error(exc_class: Type[BaseException], exc: BaseException) -> bool: + if exc_class is BrokenPipeError: + return True + + # On Windows, a broken pipe can show up as EINVAL rather than EPIPE: + # https://bugs.python.org/issue19612 + # https://bugs.python.org/issue30418 + if not WINDOWS: + return False + + return isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE) + + +@contextlib.contextmanager +def indent_log(num: int = 2) -> Generator[None, None, None]: + """ + A context manager which will cause the log output to be indented for any + log messages emitted inside it. + """ + # For thread-safety + _log_state.indentation = get_indentation() + _log_state.indentation += num + try: + yield + finally: + _log_state.indentation -= num + + +def get_indentation() -> int: + return getattr(_log_state, "indentation", 0) + + +class IndentingFormatter(logging.Formatter): + default_time_format = "%Y-%m-%dT%H:%M:%S" + + def __init__( + self, + *args: Any, + add_timestamp: bool = False, + **kwargs: Any, + ) -> None: + """ + A logging.Formatter that obeys the indent_log() context manager. + + :param add_timestamp: A bool indicating output lines should be prefixed + with their record's timestamp. + """ + self.add_timestamp = add_timestamp + super().__init__(*args, **kwargs) + + def get_message_start(self, formatted: str, levelno: int) -> str: + """ + Return the start of the formatted log message (not counting the + prefix to add to each line). + """ + if levelno < logging.WARNING: + return "" + if formatted.startswith(DEPRECATION_MSG_PREFIX): + # Then the message already has a prefix. We don't want it to + # look like "WARNING: DEPRECATION: ...." + return "" + if levelno < logging.ERROR: + return "WARNING: " + + return "ERROR: " + + def format(self, record: logging.LogRecord) -> str: + """ + Calls the standard formatter, but will indent all of the log message + lines by our current indentation level. + """ + formatted = super().format(record) + message_start = self.get_message_start(formatted, record.levelno) + formatted = message_start + formatted + + prefix = "" + if self.add_timestamp: + prefix = f"{self.formatTime(record)} " + prefix += " " * get_indentation() + formatted = "".join([prefix + line for line in formatted.splitlines(True)]) + return formatted + + +@dataclass +class IndentedRenderable: + renderable: RenderableType + indent: int + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + segments = console.render(self.renderable, options) + lines = Segment.split_lines(segments) + for line in lines: + yield Segment(" " * self.indent) + yield from line + yield Segment("\n") + + +class RichPipStreamHandler(RichHandler): + KEYWORDS: ClassVar[Optional[List[str]]] = [] + + def __init__(self, stream: Optional[TextIO], no_color: bool) -> None: + super().__init__( + console=Console(file=stream, no_color=no_color, soft_wrap=True), + show_time=False, + show_level=False, + show_path=False, + highlighter=NullHighlighter(), + ) + + # Our custom override on Rich's logger, to make things work as we need them to. + def emit(self, record: logging.LogRecord) -> None: + style: Optional[Style] = None + + # If we are given a diagnostic error to present, present it with indentation. + if getattr(record, "rich", False): + assert isinstance(record.args, tuple) + (rich_renderable,) = record.args + assert isinstance( + rich_renderable, (ConsoleRenderable, RichCast, str) + ), f"{rich_renderable} is not rich-console-renderable" + + renderable: RenderableType = IndentedRenderable( + rich_renderable, indent=get_indentation() + ) + else: + message = self.format(record) + renderable = self.render_message(record, message) + if record.levelno is not None: + if record.levelno >= logging.ERROR: + style = Style(color="red") + elif record.levelno >= logging.WARNING: + style = Style(color="yellow") + + try: + self.console.print(renderable, overflow="ignore", crop=False, style=style) + except Exception: + self.handleError(record) + + def handleError(self, record: logging.LogRecord) -> None: + """Called when logging is unable to log some output.""" + + exc_class, exc = sys.exc_info()[:2] + # If a broken pipe occurred while calling write() or flush() on the + # stdout stream in logging's Handler.emit(), then raise our special + # exception so we can handle it in main() instead of logging the + # broken pipe error and continuing. + if ( + exc_class + and exc + and self.console.file is sys.stdout + and _is_broken_pipe_error(exc_class, exc) + ): + raise BrokenStdoutLoggingError() + + return super().handleError(record) + + +class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): + def _open(self) -> TextIOWrapper: + ensure_dir(os.path.dirname(self.baseFilename)) + return super()._open() + + +class MaxLevelFilter(Filter): + def __init__(self, level: int) -> None: + self.level = level + + def filter(self, record: logging.LogRecord) -> bool: + return record.levelno < self.level + + +class ExcludeLoggerFilter(Filter): + """ + A logging Filter that excludes records from a logger (or its children). + """ + + def filter(self, record: logging.LogRecord) -> bool: + # The base Filter class allows only records from a logger (or its + # children). + return not super().filter(record) + + +def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int: + """Configures and sets up all of the logging + + Returns the requested logging level, as its integer value. + """ + + # Determine the level to be logging at. + if verbosity >= 2: + level_number = logging.DEBUG + elif verbosity == 1: + level_number = VERBOSE + elif verbosity == -1: + level_number = logging.WARNING + elif verbosity == -2: + level_number = logging.ERROR + elif verbosity <= -3: + level_number = logging.CRITICAL + else: + level_number = logging.INFO + + level = logging.getLevelName(level_number) + + # The "root" logger should match the "console" level *unless* we also need + # to log to a user log file. + include_user_log = user_log_file is not None + if include_user_log: + additional_log_file = user_log_file + root_level = "DEBUG" + else: + additional_log_file = "/dev/null" + root_level = level + + # Disable any logging besides WARNING unless we have DEBUG level logging + # enabled for vendored libraries. + vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG" + + # Shorthands for clarity + log_streams = { + "stdout": "ext://sys.stdout", + "stderr": "ext://sys.stderr", + } + handler_classes = { + "stream": "pip._internal.utils.logging.RichPipStreamHandler", + "file": "pip._internal.utils.logging.BetterRotatingFileHandler", + } + handlers = ["console", "console_errors", "console_subprocess"] + ( + ["user_log"] if include_user_log else [] + ) + + logging.config.dictConfig( + { + "version": 1, + "disable_existing_loggers": False, + "filters": { + "exclude_warnings": { + "()": "pip._internal.utils.logging.MaxLevelFilter", + "level": logging.WARNING, + }, + "restrict_to_subprocess": { + "()": "logging.Filter", + "name": subprocess_logger.name, + }, + "exclude_subprocess": { + "()": "pip._internal.utils.logging.ExcludeLoggerFilter", + "name": subprocess_logger.name, + }, + }, + "formatters": { + "indent": { + "()": IndentingFormatter, + "format": "%(message)s", + }, + "indent_with_timestamp": { + "()": IndentingFormatter, + "format": "%(message)s", + "add_timestamp": True, + }, + }, + "handlers": { + "console": { + "level": level, + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stdout"], + "filters": ["exclude_subprocess", "exclude_warnings"], + "formatter": "indent", + }, + "console_errors": { + "level": "WARNING", + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stderr"], + "filters": ["exclude_subprocess"], + "formatter": "indent", + }, + # A handler responsible for logging to the console messages + # from the "subprocessor" logger. + "console_subprocess": { + "level": level, + "class": handler_classes["stream"], + "stream": log_streams["stderr"], + "no_color": no_color, + "filters": ["restrict_to_subprocess"], + "formatter": "indent", + }, + "user_log": { + "level": "DEBUG", + "class": handler_classes["file"], + "filename": additional_log_file, + "encoding": "utf-8", + "delay": True, + "formatter": "indent_with_timestamp", + }, + }, + "root": { + "level": root_level, + "handlers": handlers, + }, + "loggers": {"pip._vendor": {"level": vendored_log_level}}, + } + ) + + return level_number diff --git a/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/urls.py b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/urls.py new file mode 100644 index 0000000000000000000000000000000000000000..9f34f882a1a6b7bf8e8ec5eb42c5d28f2c4e30aa --- /dev/null +++ b/tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/utils/urls.py @@ -0,0 +1,55 @@ +import os +import string +import urllib.parse +import urllib.request + +from .compat import WINDOWS + + +def path_to_url(path: str) -> str: + """ + Convert a path to a file: URL. The path will be made absolute and have + quoted path parts. + """ + path = os.path.normpath(os.path.abspath(path)) + url = urllib.parse.urljoin("file:", urllib.request.pathname2url(path)) + return url + + +def url_to_path(url: str) -> str: + """ + Convert a file: URL to a path. + """ + assert url.startswith( + "file:" + ), f"You can only turn file: urls into filenames (not {url!r})" + + _, netloc, path, _, _ = urllib.parse.urlsplit(url) + + if not netloc or netloc == "localhost": + # According to RFC 8089, same as empty authority. + netloc = "" + elif WINDOWS: + # If we have a UNC path, prepend UNC share notation. + netloc = "\\\\" + netloc + else: + raise ValueError( + f"non-local file URIs are not supported on this platform: {url!r}" + ) + + path = urllib.request.url2pathname(netloc + path) + + # On Windows, urlsplit parses the path as something like "/C:/Users/foo". + # This creates issues for path-related functions like io.open(), so we try + # to detect and strip the leading slash. + if ( + WINDOWS + and not netloc # Not UNC. + and len(path) >= 3 + and path[0] == "/" # Leading slash to strip. + and path[1] in string.ascii_letters # Drive letter. + and path[2:4] in (":", ":/") # Colon + end of string, or colon + absolute path. + ): + path = path[1:] + + return path