diff --git a/.gitattributes b/.gitattributes index 92f73a7b2d40256a1400122109aab6d73889f2e2..3d87cc7d9f594a1af66f04b63d3dd756ef99e95c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1582,3 +1582,5 @@ vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.8.0 vllm/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/pip-24.3.1-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.3.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ce0bc3788996dd657b067fbf5df3f527ea42fdff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5073ae2a918ead9c332ed5f334e8b9ac1c2552cb2c36a961a27270819b0883a4 +size 346369 diff --git a/parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a98680564bac38031292199a03096a428f71e4ff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf1aa7a2910bf166794c476bcdccb503d06ad6542b66d6128d1a23c04ebf7c14 +size 301392 diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/binom.h b/parrot/lib/python3.10/site-packages/scipy/special/special/binom.h new file mode 100644 index 0000000000000000000000000000000000000000..42f5af21dbef7f12f8eb3035399d06070d7236a6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/binom.h @@ -0,0 +1,89 @@ +/* Translated from Cython into C++ by SciPy developers in 2024. + * + * Original authors: Pauli Virtanen, Eric Moore + */ + +// Binomial coefficient + +#pragma once + +#include "config.h" + +#include "cephes/beta.h" +#include "cephes/gamma.h" + +namespace special { + +SPECFUN_HOST_DEVICE inline double binom(double n, double k) { + double kx, nx, num, den, dk, sgn; + + if (n < 0) { + nx = std::floor(n); + if (n == nx) { + // Undefined + return std::numeric_limits::quiet_NaN(); + } + } + + kx = std::floor(k); + if (k == kx && (std::abs(n) > 1E-8 || n == 0)) { + /* Integer case: use multiplication formula for less rounding + * error for cases where the result is an integer. + * + * This cannot be used for small nonzero n due to loss of + * precision. */ + nx = std::floor(n); + if (nx == n && kx > nx / 2 && nx > 0) { + // Reduce kx by symmetry + kx = nx - kx; + } + + if (kx >= 0 && kx < 20) { + num = 1.0; + den = 1.0; + for (int i = 1; i < 1 + static_cast(kx); i++) { + num *= i + n - kx; + den *= i; + if (std::abs(num) > 1E50) { + num /= den; + den = 1.0; + } + } + return num / den; + } + } + + // general case + if (n >= 1E10 * k and k > 0) { + // avoid under/overflows intermediate results + return std::exp(-cephes::lbeta(1 + n - k, 1 + k) - std::log(n + 1)); + } + if (k > 1E8 * std::abs(n)) { + // avoid loss of precision + num = cephes::Gamma(1 + n) / std::abs(k) + cephes::Gamma(1 + n) * n / (2 * k * k); // + ... + num /= M_PI * std::pow(std::abs(k), n); + if (k > 0) { + kx = std::floor(k); + if (static_cast(kx) == kx) { + dk = k - kx; + sgn = (static_cast(kx) % 2 == 0) ? 1 : -1; + } else { + dk = k; + sgn = 1; + } + return num * std::sin((dk - n) * M_PI) * sgn; + } + kx = std::floor(k); + if (static_cast(kx) == kx) { + return 0; + } + return num * std::sin(k * M_PI); + } + return 1 / (n + 1) / cephes::beta(1 + n - k, 1 + k); +} + +SPECFUN_HOST_DEVICE inline float binom(float n, float k) { + return binom(static_cast(n), static_cast(k)); +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/cephes/besselpoly.h b/parrot/lib/python3.10/site-packages/scipy/special/special/cephes/besselpoly.h new file mode 100644 index 0000000000000000000000000000000000000000..60e2e34d8dd2f6a85f4cf59562f23863f096e302 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/cephes/besselpoly.h @@ -0,0 +1,51 @@ +/* Translated into C++ by SciPy developers in 2024. + * + * This was not part of the original cephes library. + */ +#pragma once + +#include "../config.h" +#include "gamma.h" + +namespace special { +namespace cephes { + namespace detail { + + constexpr double besselpoly_EPS = 1.0e-17; + } + + SPECFUN_HOST_DEVICE inline double besselpoly(double a, double lambda, double nu) { + + int m, factor = 0; + double Sm, relerr, Sol; + double sum = 0.0; + + /* Special handling for a = 0.0 */ + if (a == 0.0) { + if (nu == 0.0) { + return 1.0 / (lambda + 1); + } else { + return 0.0; + } + } + /* Special handling for negative and integer nu */ + if ((nu < 0) && (std::floor(nu) == nu)) { + nu = -nu; + factor = static_cast(nu) % 2; + } + Sm = std::exp(nu * std::log(a)) / (Gamma(nu + 1) * (lambda + nu + 1)); + m = 0; + do { + sum += Sm; + Sol = Sm; + Sm *= -a * a * (lambda + nu + 1 + 2 * m) / ((nu + m + 1) * (m + 1) * (lambda + nu + 1 + 2 * m + 2)); + m++; + relerr = std::abs((Sm - Sol) / Sm); + } while (relerr > detail::besselpoly_EPS && m < 1000); + if (!factor) + return sum; + else + return -sum; + } +} // namespace cephes +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/cephes/igami.h b/parrot/lib/python3.10/site-packages/scipy/special/special/cephes/igami.h new file mode 100644 index 0000000000000000000000000000000000000000..47db08cd0b5408273750f29b333eb6db2322e705 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/cephes/igami.h @@ -0,0 +1,313 @@ +/* Translated into C++ by SciPy developers in 2024. + * Original header with Copyright information appears below. + */ + +/* + * (C) Copyright John Maddock 2006. + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt) + */ +#pragma once + +#include "../config.h" +#include "../error.h" + +#include "const.h" +#include "gamma.h" +#include "igam.h" +#include "polevl.h" + +namespace special { +namespace cephes { + + namespace detail { + + SPECFUN_HOST_DEVICE double find_inverse_s(double p, double q) { + /* + * Computation of the Incomplete Gamma Function Ratios and their Inverse + * ARMIDO R. DIDONATO and ALFRED H. MORRIS, JR. + * ACM Transactions on Mathematical Software, Vol. 12, No. 4, + * December 1986, Pages 377-393. + * + * See equation 32. + */ + double s, t; + constexpr double a[4] = {0.213623493715853, 4.28342155967104, 11.6616720288968, 3.31125922108741}; + constexpr double b[5] = {0.3611708101884203e-1, 1.27364489782223, 6.40691597760039, 6.61053765625462, 1}; + + if (p < 0.5) { + t = std::sqrt(-2 * std::log(p)); + } else { + t = std::sqrt(-2 * std::log(q)); + } + s = t - polevl(t, a, 3) / polevl(t, b, 4); + if (p < 0.5) + s = -s; + return s; + } + + SPECFUN_HOST_DEVICE inline double didonato_SN(double a, double x, unsigned N, double tolerance) { + /* + * Computation of the Incomplete Gamma Function Ratios and their Inverse + * ARMIDO R. DIDONATO and ALFRED H. MORRIS, JR. + * ACM Transactions on Mathematical Software, Vol. 12, No. 4, + * December 1986, Pages 377-393. + * + * See equation 34. + */ + double sum = 1.0; + + if (N >= 1) { + unsigned i; + double partial = x / (a + 1); + + sum += partial; + for (i = 2; i <= N; ++i) { + partial *= x / (a + i); + sum += partial; + if (partial < tolerance) { + break; + } + } + } + return sum; + } + + SPECFUN_HOST_DEVICE inline double find_inverse_gamma(double a, double p, double q) { + /* + * In order to understand what's going on here, you will + * need to refer to: + * + * Computation of the Incomplete Gamma Function Ratios and their Inverse + * ARMIDO R. DIDONATO and ALFRED H. MORRIS, JR. + * ACM Transactions on Mathematical Software, Vol. 12, No. 4, + * December 1986, Pages 377-393. + */ + double result; + + if (a == 1) { + if (q > 0.9) { + result = -std::log1p(-p); + } else { + result = -std::log(q); + } + } else if (a < 1) { + double g = special::cephes::Gamma(a); + double b = q * g; + + if ((b > 0.6) || ((b >= 0.45) && (a >= 0.3))) { + /* DiDonato & Morris Eq 21: + * + * There is a slight variation from DiDonato and Morris here: + * the first form given here is unstable when p is close to 1, + * making it impossible to compute the inverse of Q(a,x) for small + * q. Fortunately the second form works perfectly well in this case. + */ + double u; + if ((b * q > 1e-8) && (q > 1e-5)) { + u = std::pow(p * g * a, 1 / a); + } else { + u = std::exp((-q / a) - SCIPY_EULER); + } + result = u / (1 - (u / (a + 1))); + } else if ((a < 0.3) && (b >= 0.35)) { + /* DiDonato & Morris Eq 22: */ + double t = std::exp(-SCIPY_EULER - b); + double u = t * std::exp(t); + result = t * std::exp(u); + } else if ((b > 0.15) || (a >= 0.3)) { + /* DiDonato & Morris Eq 23: */ + double y = -std::log(b); + double u = y - (1 - a) * std::log(y); + result = y - (1 - a) * std::log(u) - std::log(1 + (1 - a) / (1 + u)); + } else if (b > 0.1) { + /* DiDonato & Morris Eq 24: */ + double y = -std::log(b); + double u = y - (1 - a) * std::log(y); + result = y - (1 - a) * std::log(u) - + std::log((u * u + 2 * (3 - a) * u + (2 - a) * (3 - a)) / (u * u + (5 - a) * u + 2)); + } else { + /* DiDonato & Morris Eq 25: */ + double y = -std::log(b); + double c1 = (a - 1) * std::log(y); + double c1_2 = c1 * c1; + double c1_3 = c1_2 * c1; + double c1_4 = c1_2 * c1_2; + double a_2 = a * a; + double a_3 = a_2 * a; + + double c2 = (a - 1) * (1 + c1); + double c3 = (a - 1) * (-(c1_2 / 2) + (a - 2) * c1 + (3 * a - 5) / 2); + double c4 = (a - 1) * ((c1_3 / 3) - (3 * a - 5) * c1_2 / 2 + (a_2 - 6 * a + 7) * c1 + + (11 * a_2 - 46 * a + 47) / 6); + double c5 = (a - 1) * (-(c1_4 / 4) + (11 * a - 17) * c1_3 / 6 + (-3 * a_2 + 13 * a - 13) * c1_2 + + (2 * a_3 - 25 * a_2 + 72 * a - 61) * c1 / 2 + + (25 * a_3 - 195 * a_2 + 477 * a - 379) / 12); + + double y_2 = y * y; + double y_3 = y_2 * y; + double y_4 = y_2 * y_2; + result = y + c1 + (c2 / y) + (c3 / y_2) + (c4 / y_3) + (c5 / y_4); + } + } else { + /* DiDonato and Morris Eq 31: */ + double s = find_inverse_s(p, q); + + double s_2 = s * s; + double s_3 = s_2 * s; + double s_4 = s_2 * s_2; + double s_5 = s_4 * s; + double ra = std::sqrt(a); + + double w = a + s * ra + (s_2 - 1) / 3; + w += (s_3 - 7 * s) / (36 * ra); + w -= (3 * s_4 + 7 * s_2 - 16) / (810 * a); + w += (9 * s_5 + 256 * s_3 - 433 * s) / (38880 * a * ra); + + if ((a >= 500) && (std::abs(1 - w / a) < 1e-6)) { + result = w; + } else if (p > 0.5) { + if (w < 3 * a) { + result = w; + } else { + double D = std::fmax(2, a * (a - 1)); + double lg = special::cephes::lgam(a); + double lb = std::log(q) + lg; + if (lb < -D * 2.3) { + /* DiDonato and Morris Eq 25: */ + double y = -lb; + double c1 = (a - 1) * std::log(y); + double c1_2 = c1 * c1; + double c1_3 = c1_2 * c1; + double c1_4 = c1_2 * c1_2; + double a_2 = a * a; + double a_3 = a_2 * a; + + double c2 = (a - 1) * (1 + c1); + double c3 = (a - 1) * (-(c1_2 / 2) + (a - 2) * c1 + (3 * a - 5) / 2); + double c4 = (a - 1) * ((c1_3 / 3) - (3 * a - 5) * c1_2 / 2 + (a_2 - 6 * a + 7) * c1 + + (11 * a_2 - 46 * a + 47) / 6); + double c5 = + (a - 1) * (-(c1_4 / 4) + (11 * a - 17) * c1_3 / 6 + (-3 * a_2 + 13 * a - 13) * c1_2 + + (2 * a_3 - 25 * a_2 + 72 * a - 61) * c1 / 2 + + (25 * a_3 - 195 * a_2 + 477 * a - 379) / 12); + + double y_2 = y * y; + double y_3 = y_2 * y; + double y_4 = y_2 * y_2; + result = y + c1 + (c2 / y) + (c3 / y_2) + (c4 / y_3) + (c5 / y_4); + } else { + /* DiDonato and Morris Eq 33: */ + double u = -lb + (a - 1) * std::log(w) - std::log(1 + (1 - a) / (1 + w)); + result = -lb + (a - 1) * std::log(u) - std::log(1 + (1 - a) / (1 + u)); + } + } + } else { + double z = w; + double ap1 = a + 1; + double ap2 = a + 2; + if (w < 0.15 * ap1) { + /* DiDonato and Morris Eq 35: */ + double v = std::log(p) + special::cephes::lgam(ap1); + z = std::exp((v + w) / a); + s = std::log1p(z / ap1 * (1 + z / ap2)); + z = std::exp((v + z - s) / a); + s = std::log1p(z / ap1 * (1 + z / ap2)); + z = std::exp((v + z - s) / a); + s = std::log1p(z / ap1 * (1 + z / ap2 * (1 + z / (a + 3)))); + z = std::exp((v + z - s) / a); + } + + if ((z <= 0.01 * ap1) || (z > 0.7 * ap1)) { + result = z; + } else { + /* DiDonato and Morris Eq 36: */ + double ls = std::log(didonato_SN(a, z, 100, 1e-4)); + double v = std::log(p) + special::cephes::lgam(ap1); + z = std::exp((v + z - ls) / a); + result = z * (1 - (a * std::log(z) - z - v + ls) / (a - z)); + } + } + } + return result; + } + + } // namespace detail + + SPECFUN_HOST_DEVICE inline double igamci(double a, double q); + + SPECFUN_HOST_DEVICE inline double igami(double a, double p) { + int i; + double x, fac, f_fp, fpp_fp; + + if (std::isnan(a) || std::isnan(p)) { + return std::numeric_limits::quiet_NaN(); + ; + } else if ((a < 0) || (p < 0) || (p > 1)) { + set_error("gammaincinv", SF_ERROR_DOMAIN, NULL); + } else if (p == 0.0) { + return 0.0; + } else if (p == 1.0) { + return std::numeric_limits::infinity(); + } else if (p > 0.9) { + return igamci(a, 1 - p); + } + + x = detail::find_inverse_gamma(a, p, 1 - p); + /* Halley's method */ + for (i = 0; i < 3; i++) { + fac = detail::igam_fac(a, x); + if (fac == 0.0) { + return x; + } + f_fp = (igam(a, x) - p) * x / fac; + /* The ratio of the first and second derivatives simplifies */ + fpp_fp = -1.0 + (a - 1) / x; + if (std::isinf(fpp_fp)) { + /* Resort to Newton's method in the case of overflow */ + x = x - f_fp; + } else { + x = x - f_fp / (1.0 - 0.5 * f_fp * fpp_fp); + } + } + + return x; + } + + SPECFUN_HOST_DEVICE inline double igamci(double a, double q) { + int i; + double x, fac, f_fp, fpp_fp; + + if (std::isnan(a) || std::isnan(q)) { + return std::numeric_limits::quiet_NaN(); + } else if ((a < 0.0) || (q < 0.0) || (q > 1.0)) { + set_error("gammainccinv", SF_ERROR_DOMAIN, NULL); + } else if (q == 0.0) { + return std::numeric_limits::infinity(); + } else if (q == 1.0) { + return 0.0; + } else if (q > 0.9) { + return igami(a, 1 - q); + } + + x = detail::find_inverse_gamma(a, 1 - q, q); + for (i = 0; i < 3; i++) { + fac = detail::igam_fac(a, x); + if (fac == 0.0) { + return x; + } + f_fp = (igamc(a, x) - q) * x / (-fac); + fpp_fp = -1.0 + (a - 1) / x; + if (std::isinf(fpp_fp)) { + x = x - f_fp; + } else { + x = x - f_fp / (1.0 - 0.5 * f_fp * fpp_fp); + } + } + + return x; + } + +} // namespace cephes +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/config.h b/parrot/lib/python3.10/site-packages/scipy/special/special/config.h new file mode 100644 index 0000000000000000000000000000000000000000..559ab0c39906d4b49e5274982fc8848d6c358628 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/config.h @@ -0,0 +1,226 @@ +#pragma once + +// Define math constants if they are not available +#ifndef M_E +#define M_E 2.71828182845904523536 +#endif + +#ifndef M_LOG2E +#define M_LOG2E 1.44269504088896340736 +#endif + +#ifndef M_LOG10E +#define M_LOG10E 0.434294481903251827651 +#endif + +#ifndef M_LN2 +#define M_LN2 0.693147180559945309417 +#endif + +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 +#endif + +#ifndef M_PI_4 +#define M_PI_4 0.785398163397448309616 +#endif + +#ifndef M_1_PI +#define M_1_PI 0.318309886183790671538 +#endif + +#ifndef M_2_PI +#define M_2_PI 0.636619772367581343076 +#endif + +#ifndef M_2_SQRTPI +#define M_2_SQRTPI 1.12837916709551257390 +#endif + +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 +#endif + +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.707106781186547524401 +#endif + +#ifdef __CUDACC__ +#define SPECFUN_HOST_DEVICE __host__ __device__ + +#include +#include +#include +#include +#include + +// Fallback to global namespace for functions unsupported on NVRTC Jit +#ifdef _LIBCUDACXX_COMPILER_NVRTC +#include +#endif + +namespace std { + +SPECFUN_HOST_DEVICE inline double abs(double num) { return cuda::std::abs(num); } + +SPECFUN_HOST_DEVICE inline double exp(double num) { return cuda::std::exp(num); } + +SPECFUN_HOST_DEVICE inline double log(double num) { return cuda::std::log(num); } + +SPECFUN_HOST_DEVICE inline double sqrt(double num) { return cuda::std::sqrt(num); } + +SPECFUN_HOST_DEVICE inline bool isinf(double num) { return cuda::std::isinf(num); } + +SPECFUN_HOST_DEVICE inline bool isnan(double num) { return cuda::std::isnan(num); } + +SPECFUN_HOST_DEVICE inline bool isfinite(double num) { return cuda::std::isfinite(num); } + +SPECFUN_HOST_DEVICE inline double pow(double x, double y) { return cuda::std::pow(x, y); } + +SPECFUN_HOST_DEVICE inline double sin(double x) { return cuda::std::sin(x); } + +SPECFUN_HOST_DEVICE inline double cos(double x) { return cuda::std::cos(x); } + +SPECFUN_HOST_DEVICE inline double tan(double x) { return cuda::std::tan(x); } + +SPECFUN_HOST_DEVICE inline double atan(double x) { return cuda::std::atan(x); } + +SPECFUN_HOSt_DEVICE inline double acos(double x) { return cuda::std::acos(x); } + +SPECFUN_HOST_DEVICE inline double sinh(double x) { return cuda::std::sinh(x); } + +SPECFUN_HOST_DEVICE inline double cosh(double x) { return cuda::std::cosh(x); } + +SPECFUN_HOST_DEVICE inline double asinh(double x) { return cuda::std::asinh(x); } + +SPECFUN_HOST_DEVICE inline bool signbit(double x) { return cuda::std::signbit(x); } + +// Fallback to global namespace for functions unsupported on NVRTC +#ifndef _LIBCUDACXX_COMPILER_NVRTC +SPECFUN_HOST_DEVICE inline double ceil(double x) { return cuda::std::ceil(x); } +SPECFUN_HOST_DEVICE inline double floor(double x) { return cuda::std::floor(x); } +SPECFUN_HOST_DEVICE inline double round(double x) { return cuda::std::round(x); } +SPECFUN_HOST_DEVICE inline double trunc(double x) { return cuda::std::trunc(x); } +SPECFUN_HOST_DEVICE inline double fma(double x, double y, double z) { return cuda::std::fma(x, y, z); } +SPECFUN_HOST_DEVICE inline double copysign(double x, double y) { return cuda::std::copysign(x, y); } +SPECFUN_HOST_DEVICE inline double modf(double value, double *iptr) { return cuda::std::modf(value, iptr); } +SPECFUN_HOST_DEVICE inline double fmax(double x, double y) { return cuda::std::fmax(x, y); } +SPECFUN_HOST_DEVICE inline double fmin(double x, double y) { return cuda::std::fmin(x, y); } +SPECFUN_HOST_DEVICE inline double log10(double num) { return cuda::std::log10(num); } +SPECFUN_HOST_DEVICE inline double log1p(double num) { return cuda::std::log1p(num); } +SPECFUN_HOST_DEVICE inline double frexp(double num, int *exp) { return cuda::std::frexp(num); } +SPECFUN_HOST_DEVICE inline double ldexp(double num, int *exp) { return cuda::std::ldexp(num); } +SPECFUN_HOST_DEVICE inline double fmod(double x, double y) { return cuda::std::fmod(x, y); } +#else +SPECFUN_HOST_DEVICE inline double ceil(double x) { return ::ceil(x); } +SPECFUN_HOST_DEVICE inline double floor(double x) { return ::floor(x); } +SPECFUN_HOST_DEVICE inline double round(double x) { return ::round(x); } +SPECFUN_HOST_DEVICE inline double trunc(double x) { return ::trunc(x); } +SPECFUN_HOST_DEVICE inline double fma(double x, double y, double z) { return ::fma(x, y, z); } +SPECFUN_HOST_DEVICE inline double copysign(double x, double y) { return ::copysign(x, y); } +SPECFUN_HOST_DEVICE inline double modf(double value, double *iptr) { return ::modf(value, iptr); } +SPECFUN_HOST_DEVICE inline double fmax(double x, double y) { return ::fmax(x, y); } +SPECFUN_HOST_DEVICE inline double fmin(double x, double y) { return ::fmin(x, y); } +SPECFUN_HOST_DEVICE inline double log10(double num) { return ::log10(num); } +SPECFUN_HOST_DEVICE inline double log1p(double num) { return ::log1p(num); } +SPECFUN_HOST_DEVICE inline double frexp(double num, int *exp) { return ::frexp(num); } +SPECFUN_HOST_DEVICE inline double ldexp(double num, int *exp) { return ::ldexp(num); } +SPECFUN_HOST_DEVICE inline double fmod(double x, double y) { return ::fmod(x, y); } +#endif + +template +SPECFUN_HOST_DEVICE void swap(T &a, T &b) { + cuda::std::swap(a, b); +} + +template +SPECFUN_HOST_DEVICE const T &clamp(const T &v, const T &lo, const T &hi) { + return cuda::std::clamp(v, lo, hi); +} + +template +using numeric_limits = cuda::std::numeric_limits; + +// Must use thrust for complex types in order to support CuPy +template +using complex = thrust::complex; + +template +SPECFUN_HOST_DEVICE T abs(const complex &z) { + return thrust::abs(z); +} + +template +SPECFUN_HOST_DEVICE complex exp(const complex &z) { + return thrust::exp(z); +} + +template +SPECFUN_HOST_DEVICE complex log(const complex &z) { + return thrust::log(z); +} + +template +SPECFUN_HOST_DEVICE T norm(const complex &z) { + return thrust::norm(z); +} + +template +SPECFUN_HOST_DEVICE complex sqrt(const complex &z) { + return thrust::sqrt(z); +} + +template +SPECFUN_HOST_DEVICE complex conj(const complex &z) { + return thrust::conj(z); +} + +template +SPECFUN_HOST_DEVICE complex pow(const complex &x, const complex &y) { + return thrust::pow(x, y); +} + +template +SPECFUN_HOST_DEVICE complex pow(const complex &x, const T &y) { + return thrust::pow(x, y); +} + +// Other types and utilities +using cuda::std::is_floating_point; +using cuda::std::pair; +using cuda::std::uint64_t; + +#define SPECFUN_ASSERT(a) + +} // namespace std + +#else +#define SPECFUN_HOST_DEVICE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG +#define SPECFUN_ASSERT(a) assert(a) +#else +#define SPECFUN_ASSERT(a) +#endif + +#endif diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/digamma.h b/parrot/lib/python3.10/site-packages/scipy/special/special/digamma.h new file mode 100644 index 0000000000000000000000000000000000000000..879b254a0805028d6ca69549140c891b7b2d8f62 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/digamma.h @@ -0,0 +1,204 @@ +/* Translated from Cython into C++ by SciPy developers in 2024. + * Original header comment appears below. + */ + +/* An implementation of the digamma function for complex arguments. + * + * Author: Josh Wilson + * + * Distributed under the same license as Scipy. + * + * Sources: + * [1] "The Digital Library of Mathematical Functions", dlmf.nist.gov + * + * [2] mpmath (version 0.19), http://mpmath.org + */ + +#pragma once + +#include "cephes/psi.h" +#include "cephes/zeta.h" +#include "config.h" +#include "error.h" +#include "trig.h" + +namespace special { +namespace detail { + // All of the following were computed with mpmath + // Location of the positive root + constexpr double digamma_posroot = 1.4616321449683623; + // Value of the positive root + constexpr double digamma_posrootval = -9.2412655217294275e-17; + // Location of the negative root + constexpr double digamma_negroot = -0.504083008264455409; + // Value of the negative root + constexpr double digamma_negrootval = 7.2897639029768949e-17; + + template + SPECFUN_HOST_DEVICE T digamma_zeta_series(T z, double root, double rootval) { + T res = rootval; + T coeff = -1.0; + + z = z - root; + T term; + for (int n = 1; n < 100; n++) { + coeff *= -z; + term = coeff * cephes::zeta(n + 1, root); + res += term; + if (std::abs(term) < std::numeric_limits::epsilon() * std::abs(res)) { + break; + } + } + return res; + } + + SPECFUN_HOST_DEVICE inline std::complex digamma_forward_recurrence(std::complex z, + std::complex psiz, int n) { + /* Compute digamma(z + n) using digamma(z) using the recurrence + * relation + * + * digamma(z + 1) = digamma(z) + 1/z. + * + * See https://dlmf.nist.gov/5.5#E2 */ + std::complex res = psiz; + + for (int k = 0; k < n; k++) { + res += 1.0 / (z + static_cast(k)); + } + return res; + } + + SPECFUN_HOST_DEVICE inline std::complex digamma_backward_recurrence(std::complex z, + std::complex psiz, int n) { + /* Compute digamma(z - n) using digamma(z) and a recurrence relation. */ + std::complex res = psiz; + + for (int k = 1; k < n + 1; k++) { + res -= 1.0 / (z - static_cast(k)); + } + return res; + } + + SPECFUN_HOST_DEVICE inline std::complex digamma_asymptotic_series(std::complex z) { + /* Evaluate digamma using an asymptotic series. See + * + * https://dlmf.nist.gov/5.11#E2 */ + double bernoulli2k[] = { + 0.166666666666666667, -0.0333333333333333333, 0.0238095238095238095, -0.0333333333333333333, + 0.0757575757575757576, -0.253113553113553114, 1.16666666666666667, -7.09215686274509804, + 54.9711779448621554, -529.124242424242424, 6192.12318840579710, -86580.2531135531136, + 1425517.16666666667, -27298231.0678160920, 601580873.900642368, -15116315767.0921569}; + std::complex rzz = 1.0 / z / z; + std::complex zfac = 1.0; + std::complex term; + std::complex res; + + if (!(std::isfinite(z.real()) && std::isfinite(z.imag()))) { + /* Check for infinity (or nan) and return early. + * Result of division by complex infinity is implementation dependent. + * and has been observed to vary between C++ stdlib and CUDA stdlib. + */ + return std::log(z); + } + + res = std::log(z) - 0.5 / z; + + for (int k = 1; k < 17; k++) { + zfac *= rzz; + term = -bernoulli2k[k - 1] * zfac / (2 * static_cast(k)); + res += term; + if (std::abs(term) < std::numeric_limits::epsilon() * std::abs(res)) { + break; + } + } + return res; + } + +} // namespace detail + +SPECFUN_HOST_DEVICE inline double digamma(double z) { + /* Wrap Cephes' psi to take advantage of the series expansion around + * the smallest negative zero. + */ + if (std::abs(z - detail::digamma_negroot) < 0.3) { + return detail::digamma_zeta_series(z, detail::digamma_negroot, detail::digamma_negrootval); + } + return cephes::psi(z); +} + +SPECFUN_HOST_DEVICE inline float digamma(float z) { return static_cast(digamma(static_cast(z))); } + +SPECFUN_HOST_DEVICE inline std::complex digamma(std::complex z) { + /* + * Compute the digamma function for complex arguments. The strategy + * is: + * + * - Around the two zeros closest to the origin (posroot and negroot) + * use a Taylor series with precomputed zero order coefficient. + * - If close to the origin, use a recurrence relation to step away + * from the origin. + * - If close to the negative real axis, use the reflection formula + * to move to the right halfplane. + * - If |z| is large (> 16), use the asymptotic series. + * - If |z| is small, use a recurrence relation to make |z| large + * enough to use the asymptotic series. + */ + double absz = std::abs(z); + std::complex res = 0; + /* Use the asymptotic series for z away from the negative real axis + * with abs(z) > smallabsz. */ + int smallabsz = 16; + /* Use the reflection principle for z with z.real < 0 that are within + * smallimag of the negative real axis. + * int smallimag = 6 # unused below except in a comment */ + + if (z.real() <= 0.0 && std::ceil(z.real()) == z) { + // Poles + set_error("digamma", SF_ERROR_SINGULAR, NULL); + return {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}; + } + if (std::abs(z - detail::digamma_negroot) < 0.3) { + // First negative root. + return detail::digamma_zeta_series(z, detail::digamma_negroot, detail::digamma_negrootval); + } + + if (z.real() < 0 and std::abs(z.imag()) < smallabsz) { + /* Reflection formula for digamma. See + * + *https://dlmf.nist.gov/5.5#E4 + */ + res = -M_PI * cospi(z) / sinpi(z); + z = 1.0 - z; + absz = std::abs(z); + } + + if (absz < 0.5) { + /* Use one step of the recurrence relation to step away from + * the pole. */ + res = -1.0 / z; + z += 1.0; + absz = std::abs(z); + } + + if (std::abs(z - detail::digamma_posroot) < 0.5) { + res += detail::digamma_zeta_series(z, detail::digamma_posroot, detail::digamma_posrootval); + } else if (absz > smallabsz) { + res += detail::digamma_asymptotic_series(z); + } else if (z.real() >= 0.0) { + double n = std::trunc(smallabsz - absz) + 1; + std::complex init = detail::digamma_asymptotic_series(z + n); + res += detail::digamma_backward_recurrence(z + n, init, n); + } else { + // z.real() < 0, absz < smallabsz, and z.imag() > smallimag + double n = std::trunc(smallabsz - absz) - 1; + std::complex init = detail::digamma_asymptotic_series(z - n); + res += detail::digamma_forward_recurrence(z - n, init, n); + } + return res; +} + +SPECFUN_HOST_DEVICE inline std::complex digamma(std::complex z) { + return static_cast>(digamma(static_cast>(z))); +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/error.h b/parrot/lib/python3.10/site-packages/scipy/special/special/error.h new file mode 100644 index 0000000000000000000000000000000000000000..e1973cfa81c0c761ef7599a3e060b4426be2a759 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/error.h @@ -0,0 +1,64 @@ +#pragma once + +// should be included from config.h, but that won't work until we've cleanly separated out the C and C++ parts of the +// code +#ifdef __CUDACC__ +#define SPECFUN_HOST_DEVICE __host__ __device__ +#else +#define SPECFUN_HOST_DEVICE +#endif + +typedef enum { + SF_ERROR_OK = 0, /* no error */ + SF_ERROR_SINGULAR, /* singularity encountered */ + SF_ERROR_UNDERFLOW, /* floating point underflow */ + SF_ERROR_OVERFLOW, /* floating point overflow */ + SF_ERROR_SLOW, /* too many iterations required */ + SF_ERROR_LOSS, /* loss of precision */ + SF_ERROR_NO_RESULT, /* no result obtained */ + SF_ERROR_DOMAIN, /* out of domain */ + SF_ERROR_ARG, /* invalid input parameter */ + SF_ERROR_OTHER, /* unclassified error */ + SF_ERROR__LAST +} sf_error_t; + +#ifdef __cplusplus + +#include + +namespace special { + +#ifndef SP_SPECFUN_ERROR +SPECFUN_HOST_DEVICE inline void set_error(const char *func_name, sf_error_t code, const char *fmt, ...) { + // nothing +} +#else +void set_error(const char *func_name, sf_error_t code, const char *fmt, ...); +#endif + +template +void set_error_and_nan(const char *name, sf_error_t code, T &value) { + if (code != SF_ERROR_OK) { + set_error(name, code, nullptr); + + if (code == SF_ERROR_DOMAIN || code == SF_ERROR_OVERFLOW || code == SF_ERROR_NO_RESULT) { + value = std::numeric_limits::quiet_NaN(); + } + } +} + +template +void set_error_and_nan(const char *name, sf_error_t code, std::complex &value) { + if (code != SF_ERROR_OK) { + set_error(name, code, nullptr); + + if (code == SF_ERROR_DOMAIN || code == SF_ERROR_OVERFLOW || code == SF_ERROR_NO_RESULT) { + value.real(std::numeric_limits::quiet_NaN()); + value.imag(std::numeric_limits::quiet_NaN()); + } + } +} + +} // namespace special + +#endif diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/evalpoly.h b/parrot/lib/python3.10/site-packages/scipy/special/special/evalpoly.h new file mode 100644 index 0000000000000000000000000000000000000000..0fcd162b58878e604185b0d2311816c963e63a0c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/evalpoly.h @@ -0,0 +1,47 @@ +/* Translated from Cython into C++ by SciPy developers in 2024. + * + * Original author: Josh Wilson, 2016. + */ + +/* Evaluate polynomials. + * + * All of the coefficients are stored in reverse order, i.e. if the + * polynomial is + * + * u_n x^n + u_{n - 1} x^{n - 1} + ... + u_0, + * + * then coeffs[0] = u_n, coeffs[1] = u_{n - 1}, ..., coeffs[n] = u_0. + * + * References + * ---------- + * [1] Knuth, "The Art of Computer Programming, Volume II" + */ + +#pragma once + +#include "config.h" + +namespace special { + +SPECFUN_HOST_DEVICE inline std::complex cevalpoly(const double *coeffs, int degree, std::complex z) { + /* Evaluate a polynomial with real coefficients at a complex point. + * + * Uses equation (3) in section 4.6.4 of [1]. Note that it is more + * efficient than Horner's method. + */ + double a = coeffs[0]; + double b = coeffs[1]; + double r = 2 * z.real(); + double s = std::norm(z); + double tmp; + + for (int j = 2; j < degree + 1; j++) { + tmp = b; + b = std::fma(-s, a, coeffs[j]); + a = std::fma(r, a, tmp); + } + + return z * a + b; +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/hyp2f1.h b/parrot/lib/python3.10/site-packages/scipy/special/special/hyp2f1.h new file mode 100644 index 0000000000000000000000000000000000000000..a4ea56513e53bebd2cc7381de34861829f6c35bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/hyp2f1.h @@ -0,0 +1,694 @@ +/* Implementation of Gauss's hypergeometric function for complex values. + * + * This implementation is based on the Fortran implementation by Shanjie Zhang and + * Jianming Jin included in specfun.f [1]_. Computation of Gauss's hypergeometric + * function involves handling a patchwork of special cases. By default the Zhang and + * Jin implementation has been followed as closely as possible except for situations where + * an improvement was obvious. We've attempted to document the reasons behind decisions + * made by Zhang and Jin and to document the reasons for deviating from their implementation + * when this has been done. References to the NIST Digital Library of Mathematical + * Functions [2]_ have been added where they are appropriate. The review paper by + * Pearson et al [3]_ is an excellent resource for best practices for numerical + * computation of hypergeometric functions. We have followed this review paper + * when making improvements to and correcting defects in Zhang and Jin's + * implementation. When Pearson et al propose several competing alternatives for a + * given case, we've used our best judgment to decide on the method to use. + * + * Author: Albert Steppi + * + * Distributed under the same license as Scipy. + * + * References + * ---------- + * .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996 + * .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/, + * Release 1.1.1 of 2021-03-15. F. W. J. Olver, A. B. Olde Daalhuis, + * D. W. Lozier, B. I. Schneider, R. F. Boisvert, C. W. Clark, B. R. Miller, + * B. V. Saunders, H. S. Cohl, and M. A. McClain, eds. + * .. [3] Pearson, J.W., Olver, S. & Porter, M.A. + * "Numerical methods for the computation of the confluent and Gauss + * hypergeometric functions." + * Numer Algor 74, 821-866 (2017). https://doi.org/10.1007/s11075-016-0173-0 + * .. [4] Raimundas Vidunas, "Degenerate Gauss Hypergeometric Functions", + * Kyushu Journal of Mathematics, 2007, Volume 61, Issue 1, Pages 109-135, + * .. [5] López, J.L., Temme, N.M. New series expansions of the Gauss hypergeometric + * function. Adv Comput Math 39, 349-365 (2013). + * https://doi.org/10.1007/s10444-012-9283-y + * """ + */ + +#pragma once + +#include "config.h" +#include "error.h" +#include "tools.h" + +#include "binom.h" +#include "cephes/gamma.h" +#include "cephes/lanczos.h" +#include "cephes/poch.h" +#include "cephes/hyp2f1.h" +#include "digamma.h" + +namespace special { +namespace detail { + constexpr double hyp2f1_EPS = 1e-15; + /* The original implementation in SciPy from Zhang and Jin used 1500 for the + * maximum number of series iterations in some cases and 500 in others. + * Through the empirical results on the test cases in + * scipy/special/_precompute/hyp2f1_data.py, it was determined that these values + * can lead to early termination of series which would have eventually converged + * at a reasonable level of accuracy. We've bumped the iteration limit to 3000, + * and may adjust it again based on further analysis. */ + constexpr std::uint64_t hyp2f1_MAXITER = 3000; + + SPECFUN_HOST_DEVICE inline double four_gammas_lanczos(double u, double v, double w, double x) { + /* Compute ratio of gamma functions using lanczos approximation. + * + * Computes gamma(u)*gamma(v)/(gamma(w)*gamma(x)) + * + * It is assumed that x = u + v - w, but it is left to the user to + * ensure this. + * + * The lanczos approximation takes the form + * + * gamma(x) = factor(x) * lanczos_sum_expg_scaled(x) + * + * where factor(x) = ((x + lanczos_g - 0.5)/e)**(x - 0.5). + * + * The formula above is only valid for x >= 0.5, but can be extended to + * x < 0.5 with the reflection principle. + * + * Using the lanczos approximation when computing this ratio of gamma functions + * allows factors to be combined analytically to avoid underflow and overflow + * and produce a more accurate result. The condition x = u + v - w makes it + * possible to cancel the factors in the expression + * + * factor(u) * factor(v) / (factor(w) * factor(x)) + * + * by taking one factor and absorbing it into the others. Currently, this + * implementation takes the factor corresponding to the argument with largest + * absolute value and absorbs it into the others. + * + * Since this is only called internally by four_gammas. It is assumed that + * |u| >= |v| and |w| >= |x|. + */ + + /* The below implementation may incorrectly return finite results + * at poles of the gamma function. Handle these cases explicitly. */ + if ((u == std::trunc(u) && u <= 0) || (v == std::trunc(v) && v <= 0)) { + /* Return nan if numerator has pole. Diverges to +- infinity + * depending on direction so value is undefined. */ + return std::numeric_limits::quiet_NaN(); + } + if ((w == std::trunc(w) && w <= 0) || (x == std::trunc(x) && x <= 0)) { + // Return 0 if denominator has pole but not numerator. + return 0.0; + } + + double result = 1.0; + double ugh, vgh, wgh, xgh, u_prime, v_prime, w_prime, x_prime; + + if (u >= 0.5) { + result *= cephes::lanczos_sum_expg_scaled(u); + ugh = u + cephes::lanczos_g - 0.5; + u_prime = u; + } else { + result /= cephes::lanczos_sum_expg_scaled(1 - u) * std::sin(M_PI * u) * M_1_PI; + ugh = 0.5 - u + cephes::lanczos_g; + u_prime = 1 - u; + } + + if (v >= 0.5) { + result *= cephes::lanczos_sum_expg_scaled(v); + vgh = v + cephes::lanczos_g - 0.5; + v_prime = v; + } else { + result /= cephes::lanczos_sum_expg_scaled(1 - v) * std::sin(M_PI * v) * M_1_PI; + vgh = 0.5 - v + cephes::lanczos_g; + v_prime = 1 - v; + } + + if (w >= 0.5) { + result /= cephes::lanczos_sum_expg_scaled(w); + wgh = w + cephes::lanczos_g - 0.5; + w_prime = w; + } else { + result *= cephes::lanczos_sum_expg_scaled(1 - w) * std::sin(M_PI * w) * M_1_PI; + wgh = 0.5 - w + cephes::lanczos_g; + w_prime = 1 - w; + } + + if (x >= 0.5) { + result /= cephes::lanczos_sum_expg_scaled(x); + xgh = x + cephes::lanczos_g - 0.5; + x_prime = x; + } else { + result *= cephes::lanczos_sum_expg_scaled(1 - x) * std::sin(M_PI * x) * M_1_PI; + xgh = 0.5 - x + cephes::lanczos_g; + x_prime = 1 - x; + } + + if (std::abs(u) >= std::abs(w)) { + // u has greatest absolute value. Absorb ugh into the others. + if (std::abs((v_prime - u_prime) * (v - 0.5)) < 100 * ugh and v > 100) { + /* Special case where base is close to 1. Condition taken from + * Boost's beta function implementation. */ + result *= std::exp((v - 0.5) * std::log1p((v_prime - u_prime) / ugh)); + } else { + result *= std::pow(vgh / ugh, v - 0.5); + } + + if (std::abs((u_prime - w_prime) * (w - 0.5)) < 100 * wgh and u > 100) { + result *= std::exp((w - 0.5) * std::log1p((u_prime - w_prime) / wgh)); + } else { + result *= std::pow(ugh / wgh, w - 0.5); + } + + if (std::abs((u_prime - x_prime) * (x - 0.5)) < 100 * xgh and u > 100) { + result *= std::exp((x - 0.5) * std::log1p((u_prime - x_prime) / xgh)); + } else { + result *= std::pow(ugh / xgh, x - 0.5); + } + } else { + // w has greatest absolute value. Absorb wgh into the others. + if (std::abs((u_prime - w_prime) * (u - 0.5)) < 100 * wgh and u > 100) { + result *= std::exp((u - 0.5) * std::log1p((u_prime - w_prime) / wgh)); + } else { + result *= pow(ugh / wgh, u - 0.5); + } + if (std::abs((v_prime - w_prime) * (v - 0.5)) < 100 * wgh and v > 100) { + result *= std::exp((v - 0.5) * std::log1p((v_prime - w_prime) / wgh)); + } else { + result *= std::pow(vgh / wgh, v - 0.5); + } + if (std::abs((w_prime - x_prime) * (x - 0.5)) < 100 * xgh and x > 100) { + result *= std::exp((x - 0.5) * std::log1p((w_prime - x_prime) / xgh)); + } else { + result *= std::pow(wgh / xgh, x - 0.5); + } + } + // This exhausts all cases because we assume |u| >= |v| and |w| >= |x|. + + return result; + } + + SPECFUN_HOST_DEVICE inline double four_gammas(double u, double v, double w, double x) { + double result; + + // Without loss of generality, assume |u| >= |v| and |w| >= |x|. + if (std::abs(u) > std::abs(v)) { + std::swap(u, v); + } + if (std::abs(x) > std::abs(w)) { + std::swap(x, w); + } + /* Direct ratio tends to be more accurate for arguments in this range. Range + * chosen empirically based on the relevant benchmarks in + * scipy/special/_precompute/hyp2f1_data.py */ + if (std::abs(u) <= 100 && std::abs(v) <= 100 && std::abs(w) <= 100 && std::abs(x) <= 100) { + result = cephes::Gamma(u) * cephes::Gamma(v) / (cephes::Gamma(w) * cephes::Gamma(x)); + if (std::isfinite(result) && result != 0.0) { + return result; + } + } + result = four_gammas_lanczos(u, v, w, x); + if (std::isfinite(result) && result != 0.0) { + return result; + } + // If overflow or underflow, try again with logs. + result = std::exp(cephes::lgam(v) - cephes::lgam(x) + cephes::lgam(u) - cephes::lgam(w)); + result *= cephes::gammasgn(u) * cephes::gammasgn(w) * cephes::gammasgn(v) * cephes::gammasgn(x); + return result; + } + + class HypergeometricSeriesGenerator { + /* Maclaurin series for hyp2f1. + * + * Series is convergent for |z| < 1 but is only practical for numerical + * computation when |z| < 0.9. + */ + public: + SPECFUN_HOST_DEVICE HypergeometricSeriesGenerator(double a, double b, double c, std::complex z) + : a_(a), b_(b), c_(c), z_(z), term_(1.0), k_(0) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + std::complex output = term_; + term_ = term_ * (a_ + k_) * (b_ + k_) / ((k_ + 1) * (c_ + k_)) * z_; + ++k_; + return output; + } + + private: + double a_, b_, c_; + std::complex z_, term_; + std::uint64_t k_; + }; + + class Hyp2f1Transform1Generator { + /* 1 -z transformation of standard series.*/ + public: + SPECFUN_HOST_DEVICE Hyp2f1Transform1Generator(double a, double b, double c, std::complex z) + : factor1_(four_gammas(c, c - a - b, c - a, c - b)), + factor2_(four_gammas(c, a + b - c, a, b) * std::pow(1.0 - z, c - a - b)), + generator1_(HypergeometricSeriesGenerator(a, b, a + b - c + 1, 1.0 - z)), + generator2_(HypergeometricSeriesGenerator(c - a, c - b, c - a - b + 1, 1.0 - z)) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + return factor1_ * generator1_() + factor2_ * generator2_(); + } + + private: + std::complex factor1_, factor2_; + HypergeometricSeriesGenerator generator1_, generator2_; + }; + + class Hyp2f1Transform1LimitSeriesGenerator { + /* 1 - z transform in limit as c - a - b approaches an integer m. */ + public: + SPECFUN_HOST_DEVICE Hyp2f1Transform1LimitSeriesGenerator(double a, double b, double m, std::complex z) + : d1_(special::digamma(a)), d2_(special::digamma(b)), d3_(special::digamma(1 + m)), + d4_(special::digamma(1.0)), a_(a), b_(b), m_(m), z_(z), log_1_z_(std::log(1.0 - z)), + factor_(1.0 / cephes::Gamma(m + 1)), k_(0) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + std::complex term_ = (d1_ + d2_ - d3_ - d4_ + log_1_z_) * factor_; + // Use digamma(x + 1) = digamma(x) + 1/x + d1_ += 1 / (a_ + k_); // d1 = digamma(a + k) + d2_ += 1 / (b_ + k_); // d2 = digamma(b + k) + d3_ += 1 / (1.0 + m_ + k_); // d3 = digamma(1 + m + k) + d4_ += 1 / (1.0 + k_); // d4 = digamma(1 + k) + factor_ *= (a_ + k_) * (b_ + k_) / ((k_ + 1.0) * (m_ + k_ + 1)) * (1.0 - z_); + ++k_; + return term_; + } + + private: + double d1_, d2_, d3_, d4_, a_, b_, m_; + std::complex z_, log_1_z_, factor_; + int k_; + }; + + class Hyp2f1Transform2Generator { + /* 1/z transformation of standard series.*/ + public: + SPECFUN_HOST_DEVICE Hyp2f1Transform2Generator(double a, double b, double c, std::complex z) + : factor1_(four_gammas(c, b - a, b, c - a) * std::pow(-z, -a)), + factor2_(four_gammas(c, a - b, a, c - b) * std::pow(-z, -b)), + generator1_(HypergeometricSeriesGenerator(a, a - c + 1, a - b + 1, 1.0 / z)), + generator2_(HypergeometricSeriesGenerator(b, b - c + 1, b - a + 1, 1.0 / z)) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + return factor1_ * generator1_() + factor2_ * generator2_(); + } + + private: + std::complex factor1_, factor2_; + HypergeometricSeriesGenerator generator1_, generator2_; + }; + + class Hyp2f1Transform2LimitSeriesGenerator { + /* 1/z transform in limit as a - b approaches a non-negative integer m. (Can swap a and b to + * handle the m a negative integer case. */ + public: + SPECFUN_HOST_DEVICE Hyp2f1Transform2LimitSeriesGenerator(double a, double b, double c, double m, + std::complex z) + : d1_(special::digamma(1.0)), d2_(special::digamma(1 + m)), d3_(special::digamma(a)), + d4_(special::digamma(c - a)), a_(a), b_(b), c_(c), m_(m), z_(z), log_neg_z_(std::log(-z)), + factor_(special::cephes::poch(b, m) * special::cephes::poch(1 - c + b, m) / + special::cephes::Gamma(m + 1)), + k_(0) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + std::complex term = (d1_ + d2_ - d3_ - d4_ + log_neg_z_) * factor_; + // Use digamma(x + 1) = digamma(x) + 1/x + d1_ += 1 / (1.0 + k_); // d1 = digamma(1 + k) + d2_ += 1 / (1.0 + m_ + k_); // d2 = digamma(1 + m + k) + d3_ += 1 / (a_ + k_); // d3 = digamma(a + k) + d4_ -= 1 / (c_ - a_ - k_ - 1); // d4 = digamma(c - a - k) + factor_ *= (b_ + m_ + k_) * (1 - c_ + b_ + m_ + k_) / ((k_ + 1) * (m_ + k_ + 1)) / z_; + ++k_; + return term; + } + + private: + double d1_, d2_, d3_, d4_, a_, b_, c_, m_; + std::complex z_, log_neg_z_, factor_; + std::uint64_t k_; + }; + + class Hyp2f1Transform2LimitSeriesCminusAIntGenerator { + /* 1/z transform in limit as a - b approaches a non-negative integer m, and c - a approaches + * a positive integer n. */ + public: + SPECFUN_HOST_DEVICE Hyp2f1Transform2LimitSeriesCminusAIntGenerator(double a, double b, double c, double m, + double n, std::complex z) + : d1_(special::digamma(1.0)), d2_(special::digamma(1 + m)), d3_(special::digamma(a)), + d4_(special::digamma(n)), a_(a), b_(b), c_(c), m_(m), n_(n), z_(z), log_neg_z_(std::log(-z)), + factor_(special::cephes::poch(b, m) * special::cephes::poch(1 - c + b, m) / + special::cephes::Gamma(m + 1)), + k_(0) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + std::complex term; + if (k_ < n_) { + term = (d1_ + d2_ - d3_ - d4_ + log_neg_z_) * factor_; + // Use digamma(x + 1) = digamma(x) + 1/x + d1_ += 1 / (1.0 + k_); // d1 = digamma(1 + k) + d2_ += 1 / (1 + m_ + k_); // d2 = digamma(1 + m + k) + d3_ += 1 / (a_ + k_); // d3 = digamma(a + k) + d4_ -= 1 / (n_ - k_ - 1); // d4 = digamma(c - a - k) + factor_ *= (b_ + m_ + k_) * (1 - c_ + b_ + m_ + k_) / ((k_ + 1) * (m_ + k_ + 1)) / z_; + ++k_; + return term; + } + if (k_ == n_) { + /* When c - a approaches a positive integer and k_ >= c - a = n then + * poch(1 - c + b + m + k) = poch(1 - c + a + k) = approaches zero and + * digamma(c - a - k) approaches a pole. However we can use the limit + * digamma(-n + epsilon) / gamma(-n + epsilon) -> (-1)**(n + 1) * (n+1)! as epsilon -> 0 + * to continue the series. + * + * poch(1 - c + b, m + k) = gamma(1 - c + b + m + k)/gamma(1 - c + b) + * + * If a - b is an integer and c - a is an integer, then a and b must both be integers, so assume + * a and b are integers and take the limit as c approaches an integer. + * + * gamma(1 - c + epsilon + a + k)/gamma(1 - c - epsilon + b) = + * (gamma(c + epsilon - b) / gamma(c + epsilon - a - k)) * + * (sin(pi * (c + epsilon - b)) / sin(pi * (c + epsilon - a - k))) (reflection principle) + * + * In the limit as epsilon goes to zero, the ratio of sines will approach + * (-1)**(a - b + k) = (-1)**(m + k) + * + * We may then replace + * + * poch(1 - c - epsilon + b, m + k)*digamma(c + epsilon - a - k) + * + * with + * + * (-1)**(a - b + k)*gamma(c + epsilon - b) * digamma(c + epsilon - a - k) / gamma(c + epsilon - a - k) + * + * and taking the limit epsilon -> 0 gives + * + * (-1)**(a - b + k) * gamma(c - b) * (-1)**(k + a - c + 1)(k + a - c)! + * = (-1)**(c - b - 1)*Gamma(k + a - c + 1) + */ + factor_ = std::pow(-1, m_ + n_) * special::binom(c_ - 1, b_ - 1) * + special::cephes::poch(c_ - a_ + 1, m_ - 1) / std::pow(z_, static_cast(k_)); + } + term = factor_; + factor_ *= (b_ + m_ + k_) * (k_ + a_ - c_ + 1) / ((k_ + 1) * (m_ + k_ + 1)) / z_; + ++k_; + return term; + } + + private: + double d1_, d2_, d3_, d4_, a_, b_, c_, m_, n_; + std::complex z_, log_neg_z_, factor_; + std::uint64_t k_; + }; + + class Hyp2f1Transform2LimitFinitePartGenerator { + /* Initial finite sum in limit as a - b approaches a non-negative integer m. The limiting series + * for the 1 - z transform also has an initial finite sum, but it is a standard hypergeometric + * series. */ + public: + SPECFUN_HOST_DEVICE Hyp2f1Transform2LimitFinitePartGenerator(double b, double c, double m, + std::complex z) + : b_(b), c_(c), m_(m), z_(z), term_(cephes::Gamma(m) / cephes::Gamma(c - b)), k_(0) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + std::complex output = term_; + term_ = term_ * (b_ + k_) * (c_ - b_ - k_ - 1) / ((k_ + 1) * (m_ - k_ - 1)) / z_; + ++k_; + return output; + } + + private: + double b_, c_, m_; + std::complex z_, term_; + std::uint64_t k_; + }; + + class LopezTemmeSeriesGenerator { + /* Lopez-Temme Series for Gaussian hypergeometric function [4]. + * + * Converges for all z with real(z) < 1, including in the regions surrounding + * the points exp(+- i*pi/3) that are not covered by any of the standard + * transformations. + */ + public: + SPECFUN_HOST_DEVICE LopezTemmeSeriesGenerator(double a, double b, double c, std::complex z) + : n_(0), a_(a), b_(b), c_(c), phi_previous_(1.0), phi_(1 - 2 * b / c), z_(z), Z_(a * z / (z - 2.0)) {} + + SPECFUN_HOST_DEVICE std::complex operator()() { + if (n_ == 0) { + ++n_; + return 1.0; + } + if (n_ > 1) { // Update phi and Z for n>=2 + double new_phi = ((n_ - 1) * phi_previous_ - (2.0 * b_ - c_) * phi_) / (c_ + (n_ - 1)); + phi_previous_ = phi_; + phi_ = new_phi; + Z_ = Z_ * z_ / (z_ - 2.0) * ((a_ + (n_ - 1)) / n_); + } + ++n_; + return Z_ * phi_; + } + + private: + std::uint64_t n_; + double a_, b_, c_, phi_previous_, phi_; + std::complex z_, Z_; + }; + + SPECFUN_HOST_DEVICE std::complex hyp2f1_transform1_limiting_case(double a, double b, double c, double m, + std::complex z) { + /* 1 - z transform in limiting case where c - a - b approaches an integer m. */ + std::complex result = 0.0; + if (m >= 0) { + if (m != 0) { + auto series_generator = HypergeometricSeriesGenerator(a, b, 1 - m, 1.0 - z); + result += four_gammas(m, c, a + m, b + m) * series_eval_fixed_length(series_generator, + std::complex{0.0, 0.0}, + static_cast(m)); + } + std::complex prefactor = std::pow(-1.0, m + 1) * special::cephes::Gamma(c) / + (special::cephes::Gamma(a) * special::cephes::Gamma(b)) * + std::pow(1.0 - z, m); + auto series_generator = Hyp2f1Transform1LimitSeriesGenerator(a + m, b + m, m, z); + result += prefactor * series_eval(series_generator, std::complex{0.0, 0.0}, hyp2f1_EPS, + hyp2f1_MAXITER, "hyp2f1"); + return result; + } else { + result = four_gammas(-m, c, a, b) * std::pow(1.0 - z, m); + auto series_generator1 = HypergeometricSeriesGenerator(a + m, b + m, 1 + m, 1.0 - z); + result *= series_eval_fixed_length(series_generator1, std::complex{0.0, 0.0}, + static_cast(-m)); + double prefactor = std::pow(-1.0, m + 1) * special::cephes::Gamma(c) / + (special::cephes::Gamma(a + m) * special::cephes::Gamma(b + m)); + auto series_generator2 = Hyp2f1Transform1LimitSeriesGenerator(a, b, -m, z); + result += prefactor * series_eval(series_generator2, std::complex{0.0, 0.0}, hyp2f1_EPS, + hyp2f1_MAXITER, "hyp2f1"); + return result; + } + } + + SPECFUN_HOST_DEVICE std::complex hyp2f1_transform2_limiting_case(double a, double b, double c, double m, + std::complex z) { + /* 1 / z transform in limiting case where a - b approaches a non-negative integer m. Negative integer case + * can be handled by swapping a and b. */ + auto series_generator1 = Hyp2f1Transform2LimitFinitePartGenerator(b, c, m, z); + std::complex result = cephes::Gamma(c) / cephes::Gamma(a) * std::pow(-z, -b); + result *= + series_eval_fixed_length(series_generator1, std::complex{0.0, 0.0}, static_cast(m)); + std::complex prefactor = cephes::Gamma(c) / (cephes::Gamma(a) * cephes::Gamma(c - b) * std::pow(-z, a)); + double n = c - a; + if (abs(n - std::round(n)) < hyp2f1_EPS) { + auto series_generator2 = Hyp2f1Transform2LimitSeriesCminusAIntGenerator(a, b, c, m, n, z); + result += prefactor * series_eval(series_generator2, std::complex{0.0, 0.0}, hyp2f1_EPS, + hyp2f1_MAXITER, "hyp2f1"); + return result; + } + auto series_generator2 = Hyp2f1Transform2LimitSeriesGenerator(a, b, c, m, z); + result += prefactor * + series_eval(series_generator2, std::complex{0.0, 0.0}, hyp2f1_EPS, hyp2f1_MAXITER, "hyp2f1"); + return result; + } + +} // namespace detail + +SPECFUN_HOST_DEVICE inline std::complex hyp2f1(double a, double b, double c, std::complex z) { + /* Special Cases + * ----------------------------------------------------------------------- + * Takes constant value 1 when a = 0 or b = 0, even if c is a non-positive + * integer. This follows mpmath. */ + if (a == 0 || b == 0) { + return 1.0; + } + double z_abs = std::abs(z); + // Equals 1 when z i 0, unless c is 0. + if (z_abs == 0) { + if (c != 0) { + return 1.0; + } else { + // Returning real part NAN and imaginary part 0 follows mpmath. + return std::complex{std::numeric_limits::quiet_NaN(), 0}; + } + } + bool a_neg_int = a == std::trunc(a) && a < 0; + bool b_neg_int = b == std::trunc(b) && b < 0; + bool c_non_pos_int = c == std::trunc(c) and c <= 0; + /* Diverges when c is a non-positive integer unless a is an integer with + * c <= a <= 0 or b is an integer with c <= b <= 0, (or z equals 0 with + * c != 0) Cases z = 0, a = 0, or b = 0 have already been handled. We follow + * mpmath in handling the degenerate cases where any of a, b, c are + * non-positive integers. See [3] for a treatment of degenerate cases. */ + if (c_non_pos_int && !((a_neg_int && c <= a && a < 0) || (b_neg_int && c <= b && b < 0))) { + return std::complex{std::numeric_limits::infinity(), 0}; + } + /* Reduces to a polynomial when a or b is a negative integer. + * If a and b are both negative integers, we take care to terminate + * the series at a or b of smaller magnitude. This is to ensure proper + * handling of situations like a < c < b <= 0, a, b, c all non-positive + * integers, where terminating at a would lead to a term of the form 0 / 0. */ + std::uint64_t max_degree; + if (a_neg_int || b_neg_int) { + if (a_neg_int && b_neg_int) { + max_degree = a > b ? std::abs(a) : std::abs(b); + } else if (a_neg_int) { + max_degree = std::abs(a); + } else { + max_degree = std::abs(b); + } + if (max_degree <= UINT64_MAX) { + auto series_generator = detail::HypergeometricSeriesGenerator(a, b, c, z); + return detail::series_eval_fixed_length(series_generator, std::complex{0.0, 0.0}, max_degree + 1); + } else { + set_error("hyp2f1", SF_ERROR_NO_RESULT, NULL); + return std::complex{std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()}; + } + } + // Kummer's Theorem for z = -1; c = 1 + a - b (DLMF 15.4.26) + if (std::abs(z + 1.0) < detail::hyp2f1_EPS && std::abs(1 + a - b - c) < detail::hyp2f1_EPS && !c_non_pos_int) { + return detail::four_gammas(a - b + 1, 0.5 * a + 1, a + 1, 0.5 * a - b + 1); + } + std::complex result; + bool c_minus_a_neg_int = c - a == std::trunc(c - a) && c - a < 0; + bool c_minus_b_neg_int = c - b == std::trunc(c - b) && c - b < 0; + /* If one of c - a or c - b is a negative integer, reduces to evaluating + * a polynomial through an Euler hypergeometric transformation. + * (DLMF 15.8.1) */ + if (c_minus_a_neg_int || c_minus_b_neg_int) { + max_degree = c_minus_b_neg_int ? std::abs(c - b) : std::abs(c - a); + if (max_degree <= UINT64_MAX) { + result = std::pow(1.0 - z, c - a - b); + auto series_generator = detail::HypergeometricSeriesGenerator(c - a, c - b, c, z); + result *= + detail::series_eval_fixed_length(series_generator, std::complex{0.0, 0.0}, max_degree + 2); + return result; + } else { + set_error("hyp2f1", SF_ERROR_NO_RESULT, NULL); + return std::complex{std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()}; + } + } + /* Diverges as real(z) -> 1 when c <= a + b. + * Todo: Actually check for overflow instead of using a fixed tolerance for + * all parameter combinations like in the Fortran original. */ + if (std::abs(1 - z.real()) < detail::hyp2f1_EPS && z.imag() == 0 && c - a - b <= 0 && !c_non_pos_int) { + return std::complex{std::numeric_limits::infinity(), 0}; + } + // Gauss's Summation Theorem for z = 1; c - a - b > 0 (DLMF 15.4.20). + if (z == 1.0 && c - a - b > 0 && !c_non_pos_int) { + return detail::four_gammas(c, c - a - b, c - a, c - b); + } + /* |z| < 0, z.real() >= 0. Use the Maclaurin Series. + * ----------------------------------------------------------------------- + * Apply Euler Hypergeometric Transformation (DLMF 15.8.1) to reduce + * size of a and b if possible. We follow Zhang and Jin's + * implementation [1] although there is very likely a better heuristic + * to determine when this transformation should be applied. As it + * stands, this hurts precision in some cases. */ + if (z_abs < 0.9 && z.real() >= 0) { + if (c - a < a && c - b < b) { + result = std::pow(1.0 - z, c - a - b); + auto series_generator = detail::HypergeometricSeriesGenerator(c - a, c - b, c, z); + result *= detail::series_eval(series_generator, std::complex{0.0, 0.0}, detail::hyp2f1_EPS, + detail::hyp2f1_MAXITER, "hyp2f1"); + return result; + } + auto series_generator = detail::HypergeometricSeriesGenerator(a, b, c, z); + return detail::series_eval(series_generator, std::complex{0.0, 0.0}, detail::hyp2f1_EPS, + detail::hyp2f1_MAXITER, "hyp2f1"); + } + /* Points near exp(iπ/3), exp(-iπ/3) not handled by any of the standard + * transformations. Use series of López and Temme [5]. These regions + * were not correctly handled by Zhang and Jin's implementation. + * -------------------------------------------------------------------------*/ + if (0.9 <= z_abs && z_abs < 1.1 && std::abs(1.0 - z) >= 0.9 && z.real() >= 0) { + /* This condition for applying Euler Transformation (DLMF 15.8.1) + * was determined empirically to work better for this case than that + * used in Zhang and Jin's implementation for |z| < 0.9, + * real(z) >= 0. */ + if ((c - a <= a && c - b < b) || (c - a < a && c - b <= b)) { + auto series_generator = detail::LopezTemmeSeriesGenerator(c - a, c - b, c, z); + result = std::pow(1.0 - 0.5 * z, a - c); // Lopez-Temme prefactor + result *= detail::series_eval(series_generator, std::complex{0.0, 0.0}, detail::hyp2f1_EPS, + detail::hyp2f1_MAXITER, "hyp2f1"); + return std::pow(1.0 - z, c - a - b) * result; // Euler transform prefactor. + } + auto series_generator = detail::LopezTemmeSeriesGenerator(a, b, c, z); + result = detail::series_eval(series_generator, std::complex{0.0, 0.0}, detail::hyp2f1_EPS, + detail::hyp2f1_MAXITER, "hyp2f1"); + return std::pow(1.0 - 0.5 * z, -a) * result; // Lopez-Temme prefactor. + } + /* z/(z - 1) transformation (DLMF 15.8.1). Avoids cancellation issues that + * occur with Maclaurin series for real(z) < 0. + * -------------------------------------------------------------------------*/ + if (z_abs < 1.1 && z.real() < 0) { + if (0 < b && b < a && a < c) { + std::swap(a, b); + } + auto series_generator = detail::HypergeometricSeriesGenerator(a, c - b, c, z / (z - 1.0)); + return std::pow(1.0 - z, -a) * detail::series_eval(series_generator, std::complex{0.0, 0.0}, + detail::hyp2f1_EPS, detail::hyp2f1_MAXITER, "hyp2f1"); + } + /* 1 - z transformation (DLMF 15.8.4). */ + if (0.9 <= z_abs && z_abs < 1.1) { + if (std::abs(c - a - b - std::round(c - a - b)) < detail::hyp2f1_EPS) { + // Removable singularity when c - a - b is an integer. Need to use limiting formula. + double m = std::round(c - a - b); + return detail::hyp2f1_transform1_limiting_case(a, b, c, m, z); + } + auto series_generator = detail::Hyp2f1Transform1Generator(a, b, c, z); + return detail::series_eval(series_generator, std::complex{0.0, 0.0}, detail::hyp2f1_EPS, + detail::hyp2f1_MAXITER, "hyp2f1"); + } + /* 1/z transformation (DLMF 15.8.2). */ + if (std::abs(a - b - std::round(a - b)) < detail::hyp2f1_EPS) { + if (b > a) { + std::swap(a, b); + } + double m = std::round(a - b); + return detail::hyp2f1_transform2_limiting_case(a, b, c, m, z); + } + auto series_generator = detail::Hyp2f1Transform2Generator(a, b, c, z); + return detail::series_eval(series_generator, std::complex{0.0, 0.0}, detail::hyp2f1_EPS, + detail::hyp2f1_MAXITER, "hyp2f1"); +} + +inline std::complex hyp2f1(float a, float b, float c, std::complex x) { + return static_cast>(hyp2f1(static_cast(a), static_cast(b), + static_cast(c), static_cast>(x))); +} + +inline double hyp2f1(double a, double b, double c, double x) { return cephes::hyp2f1(a, b, c, x); } + +inline float hyp2f1(float a, float b, float c, float x) { + return hyp2f1(static_cast(a), static_cast(b), static_cast(c), static_cast(x)); +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/lambertw.h b/parrot/lib/python3.10/site-packages/scipy/special/special/lambertw.h new file mode 100644 index 0000000000000000000000000000000000000000..813dc16983a4327ed1aa6d81eb67a0ce11e7109c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/lambertw.h @@ -0,0 +1,150 @@ +/* Translated from Cython into C++ by SciPy developers in 2023. + * Original header with Copyright information appears below. + */ + +/* Implementation of the Lambert W function [1]. Based on MPMath + * Implementation [2], and documentation [3]. + * + * Copyright: Yosef Meller, 2009 + * Author email: mellerf@netvision.net.il + * + * Distributed under the same license as SciPy + * + * + * References: + * [1] On the Lambert W function, Adv. Comp. Math. 5 (1996) 329-359, + * available online: https://web.archive.org/web/20230123211413/https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf + * [2] mpmath source code, + https://github.com/mpmath/mpmath/blob/c5939823669e1bcce151d89261b802fe0d8978b4/mpmath/functions/functions.py#L435-L461 + * [3] + https://web.archive.org/web/20230504171447/https://mpmath.org/doc/current/functions/powers.html#lambert-w-function + * + + * TODO: use a series expansion when extremely close to the branch point + * at `-1/e` and make sure that the proper branch is chosen there. + */ + +#pragma once + +#include "config.h" +#include "error.h" +#include "evalpoly.h" + +namespace special { +constexpr double EXPN1 = 0.36787944117144232159553; // exp(-1) +constexpr double OMEGA = 0.56714329040978387299997; // W(1, 0) + +namespace detail { + SPECFUN_HOST_DEVICE inline std::complex lambertw_branchpt(std::complex z) { + // Series for W(z, 0) around the branch point; see 4.22 in [1]. + double coeffs[] = {-1.0 / 3.0, 1.0, -1.0}; + std::complex p = std::sqrt(2.0 * (M_E * z + 1.0)); + + return cevalpoly(coeffs, 2, p); + } + + SPECFUN_HOST_DEVICE inline std::complex lambertw_pade0(std::complex z) { + // (3, 2) Pade approximation for W(z, 0) around 0. + double num[] = {12.85106382978723404255, 12.34042553191489361902, 1.0}; + double denom[] = {32.53191489361702127660, 14.34042553191489361702, 1.0}; + + /* This only gets evaluated close to 0, so we don't need a more + * careful algorithm that avoids overflow in the numerator for + * large z. */ + return z * cevalpoly(num, 2, z) / cevalpoly(denom, 2, z); + } + + SPECFUN_HOST_DEVICE inline std::complex lambertw_asy(std::complex z, long k) { + /* Compute the W function using the first two terms of the + * asymptotic series. See 4.20 in [1]. + */ + std::complex w = std::log(z) + 2.0 * M_PI * k * std::complex(0, 1); + return w - std::log(w); + } + +} // namespace detail + +SPECFUN_HOST_DEVICE inline std::complex lambertw(std::complex z, long k, double tol) { + double absz; + std::complex w; + std::complex ew, wew, wewz, wn; + + if (std::isnan(z.real()) || std::isnan(z.imag())) { + return z; + } + if (z.real() == std::numeric_limits::infinity()) { + return z + 2.0 * M_PI * k * std::complex(0, 1); + } + if (z.real() == -std::numeric_limits::infinity()) { + return -z + (2.0 * M_PI * k + M_PI) * std::complex(0, 1); + } + if (z == 0.0) { + if (k == 0) { + return z; + } + set_error("lambertw", SF_ERROR_SINGULAR, NULL); + return -std::numeric_limits::infinity(); + } + if (z == 1.0 && k == 0) { + // Split out this case because the asymptotic series blows up + return OMEGA; + } + + absz = std::abs(z); + // Get an initial guess for Halley's method + if (k == 0) { + if (std::abs(z + EXPN1) < 0.3) { + w = detail::lambertw_branchpt(z); + } else if (-1.0 < z.real() && z.real() < 1.5 && std::abs(z.imag()) < 1.0 && + -2.5 * std::abs(z.imag()) - 0.2 < z.real()) { + /* Empirically determined decision boundary where the Pade + * approximation is more accurate. */ + w = detail::lambertw_pade0(z); + } else { + w = detail::lambertw_asy(z, k); + } + } else if (k == -1) { + if (absz <= EXPN1 && z.imag() == 0.0 && z.real() < 0.0) { + w = std::log(-z.real()); + } else { + w = detail::lambertw_asy(z, k); + } + } else { + w = detail::lambertw_asy(z, k); + } + + // Halley's method; see 5.9 in [1] + if (w.real() >= 0) { + // Rearrange the formula to avoid overflow in exp + for (int i = 0; i < 100; i++) { + ew = std::exp(-w); + wewz = w - z * ew; + wn = w - wewz / (w + 1.0 - (w + 2.0) * wewz / (2.0 * w + 2.0)); + if (std::abs(wn - w) <= tol * std::abs(wn)) { + return wn; + } + w = wn; + } + } else { + for (int i = 0; i < 100; i++) { + ew = std::exp(w); + wew = w * ew; + wewz = wew - z; + wn = w - wewz / (wew + ew - (w + 2.0) * wewz / (2.0 * w + 2.0)); + if (std::abs(wn - w) <= tol * std::abs(wn)) { + return wn; + } + w = wn; + } + } + + set_error("lambertw", SF_ERROR_SLOW, "iteration failed to converge: %g + %gj", z.real(), z.imag()); + return {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}; +} + +SPECFUN_HOST_DEVICE inline std::complex lambertw(std::complex z, long k, float tol) { + return static_cast>( + lambertw(static_cast>(z), k, static_cast(tol))); +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/loggamma.h b/parrot/lib/python3.10/site-packages/scipy/special/special/loggamma.h new file mode 100644 index 0000000000000000000000000000000000000000..a74770fb8c28cc37a75d87b4840d9bdc0ed62055 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/loggamma.h @@ -0,0 +1,163 @@ +/* Translated from Cython into C++ by SciPy developers in 2024. + * Original header comment appears below. + */ + +/* An implementation of the principal branch of the logarithm of + * Gamma. Also contains implementations of Gamma and 1/Gamma which are + * easily computed from log-Gamma. + * + * Author: Josh Wilson + * + * Distributed under the same license as Scipy. + * + * References + * ---------- + * [1] Hare, "Computing the Principal Branch of log-Gamma", + * Journal of Algorithms, 1997. + * + * [2] Julia, + * https://github.com/JuliaLang/julia/blob/master/base/special/gamma.jl + */ + +#pragma once + +#include "cephes/gamma.h" +#include "cephes/rgamma.h" +#include "config.h" +#include "error.h" +#include "evalpoly.h" +#include "trig.h" +#include "zlog1.h" + +namespace special { + +namespace detail { + constexpr double loggamma_SMALLX = 7; + constexpr double loggamma_SMALLY = 7; + constexpr double loggamma_HLOG2PI = 0.918938533204672742; // log(2*pi)/2 + constexpr double loggamma_LOGPI = 1.1447298858494001741434262; // log(pi) + constexpr double loggamma_TAYLOR_RADIUS = 0.2; + + SPECFUN_HOST_DEVICE std::complex loggamma_stirling(std::complex z) { + /* Stirling series for log-Gamma + * + * The coefficients are B[2*n]/(2*n*(2*n - 1)) where B[2*n] is the + * (2*n)th Bernoulli number. See (1.1) in [1]. + */ + double coeffs[] = {-2.955065359477124183E-2, 6.4102564102564102564E-3, -1.9175269175269175269E-3, + 8.4175084175084175084E-4, -5.952380952380952381E-4, 7.9365079365079365079E-4, + -2.7777777777777777778E-3, 8.3333333333333333333E-2}; + std::complex rz = 1.0 / z; + std::complex rzz = rz / z; + + return (z - 0.5) * std::log(z) - z + loggamma_HLOG2PI + rz * cevalpoly(coeffs, 7, rzz); + } + + SPECFUN_HOST_DEVICE std::complex loggamma_recurrence(std::complex z) { + /* Backward recurrence relation. + * + * See Proposition 2.2 in [1] and the Julia implementation [2]. + * + */ + int signflips = 0; + int sb = 0; + std::complex shiftprod = z; + + z += 1.0; + int nsb; + while (z.real() <= loggamma_SMALLX) { + shiftprod *= z; + nsb = std::signbit(shiftprod.imag()); + signflips += nsb != 0 && sb == 0 ? 1 : 0; + sb = nsb; + z += 1.0; + } + return loggamma_stirling(z) - std::log(shiftprod) - signflips * 2 * M_PI * std::complex(0, 1); + } + + SPECFUN_HOST_DEVICE std::complex loggamma_taylor(std::complex z) { + /* Taylor series for log-Gamma around z = 1. + * + * It is + * + * loggamma(z + 1) = -gamma*z + zeta(2)*z**2/2 - zeta(3)*z**3/3 ... + * + * where gamma is the Euler-Mascheroni constant. + */ + + double coeffs[] = { + -4.3478266053040259361E-2, 4.5454556293204669442E-2, -4.7619070330142227991E-2, 5.000004769810169364E-2, + -5.2631679379616660734E-2, 5.5555767627403611102E-2, -5.8823978658684582339E-2, 6.2500955141213040742E-2, + -6.6668705882420468033E-2, 7.1432946295361336059E-2, -7.6932516411352191473E-2, 8.3353840546109004025E-2, + -9.0954017145829042233E-2, 1.0009945751278180853E-1, -1.1133426586956469049E-1, 1.2550966952474304242E-1, + -1.4404989676884611812E-1, 1.6955717699740818995E-1, -2.0738555102867398527E-1, 2.7058080842778454788E-1, + -4.0068563438653142847E-1, 8.2246703342411321824E-1, -5.7721566490153286061E-1}; + + z -= 1.0; + return z * cevalpoly(coeffs, 22, z); + } +} // namespace detail + +SPECFUN_HOST_DEVICE inline double loggamma(double x) { + if (x < 0.0) { + return std::numeric_limits::quiet_NaN(); + } + return cephes::lgam(x); +} + +SPECFUN_HOST_DEVICE inline float loggamma(float x) { return loggamma(static_cast(x)); } + +SPECFUN_HOST_DEVICE inline std::complex loggamma(std::complex z) { + // Compute the principal branch of log-Gamma + + if (std::isnan(z.real()) || std::isnan(z.imag())) { + return {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}; + } + if (z.real() <= 0 and z == std::floor(z.real())) { + set_error("loggamma", SF_ERROR_SINGULAR, NULL); + return {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}; + } + if (z.real() > detail::loggamma_SMALLX || std::abs(z.imag()) > detail::loggamma_SMALLY) { + return detail::loggamma_stirling(z); + } + if (std::abs(z - 1.0) < detail::loggamma_TAYLOR_RADIUS) { + return detail::loggamma_taylor(z); + } + if (std::abs(z - 2.0) < detail::loggamma_TAYLOR_RADIUS) { + // Recurrence relation and the Taylor series around 1. + return detail::zlog1(z - 1.0) + detail::loggamma_taylor(z - 1.0); + } + if (z.real() < 0.1) { + // Reflection formula; see Proposition 3.1 in [1] + double tmp = std::copysign(2 * M_PI, z.imag()) * std::floor(0.5 * z.real() + 0.25); + return std::complex(detail::loggamma_LOGPI, tmp) - std::log(sinpi(z)) - loggamma(1.0 - z); + } + if (std::signbit(z.imag()) == 0) { + // z.imag() >= 0 but is not -0.0 + return detail::loggamma_recurrence(z); + } + return std::conj(detail::loggamma_recurrence(std::conj(z))); +} + +SPECFUN_HOST_DEVICE inline std::complex loggamma(std::complex z) { + return static_cast>(loggamma(static_cast>(z))); +} + +SPECFUN_HOST_DEVICE inline double rgamma(double z) { return cephes::rgamma(z); } + +SPECFUN_HOST_DEVICE inline float rgamma(float z) { return rgamma(static_cast(z)); } + +SPECFUN_HOST_DEVICE inline std::complex rgamma(std::complex z) { + // Compute 1/Gamma(z) using loggamma. + if (z.real() <= 0 && z == std::floor(z.real())) { + // Zeros at 0, -1, -2, ... + return 0.0; + } + return std::exp(-loggamma(z)); +} + +SPECFUN_HOST_DEVICE inline std::complex rgamma(std::complex z) { + return static_cast>(rgamma(static_cast>(z))); +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/tools.h b/parrot/lib/python3.10/site-packages/scipy/special/special/tools.h new file mode 100644 index 0000000000000000000000000000000000000000..1b94453cb239765af00be0affd374be6bd211c74 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/tools.h @@ -0,0 +1,269 @@ +/* Building blocks for implementing special functions */ + +#pragma once + +#include "config.h" +#include "error.h" + +namespace special { +namespace detail { + + /* Result type of a "generator", a callable object that produces a value + * each time it is called. + */ + template + using generator_result_t = std::decay_t>; + + /* Used to deduce the type of the numerator/denominator of a fraction. */ + template + struct pair_traits; + + template + struct pair_traits> { + using value_type = T; + }; + + template + using pair_value_t = typename pair_traits::value_type; + + /* Used to extract the "value type" of a complex type. */ + template + struct real_type { + using type = T; + }; + + template + struct real_type> { + using type = T; + }; + + template + using real_type_t = typename real_type::type; + + // Return NaN, handling both real and complex types. + template + SPECFUN_HOST_DEVICE inline std::enable_if_t, T> maybe_complex_NaN() { + return std::numeric_limits::quiet_NaN(); + } + + template + SPECFUN_HOST_DEVICE inline std::enable_if_t, T> maybe_complex_NaN() { + using V = typename T::value_type; + return {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}; + } + + // Series evaluators. + template > + SPECFUN_HOST_DEVICE T series_eval(Generator &g, T init_val, real_type_t tol, std::uint64_t max_terms, + const char *func_name) { + /* Sum an infinite series to a given precision. + * + * g : a generator of terms for the series. + * + * init_val : A starting value that terms are added to. This argument determines the + * type of the result. + * + * tol : relative tolerance for stopping criterion. + * + * max_terms : The maximum number of terms to add before giving up and declaring + * non-convergence. + * + * func_name : The name of the function within SciPy where this call to series_eval + * will ultimately be used. This is needed to pass to set_error in case + * of non-convergence. + */ + T result = init_val; + T term; + for (std::uint64_t i = 0; i < max_terms; ++i) { + term = g(); + result += term; + if (std::abs(term) < std::abs(result) * tol) { + return result; + } + } + // Exceeded max terms without converging. Return NaN. + set_error(func_name, SF_ERROR_NO_RESULT, NULL); + return maybe_complex_NaN(); + } + + template > + SPECFUN_HOST_DEVICE T series_eval_fixed_length(Generator &g, T init_val, std::uint64_t num_terms) { + /* Sum a fixed number of terms from a series. + * + * g : a generator of terms for the series. + * + * init_val : A starting value that terms are added to. This argument determines the + * type of the result. + * + * max_terms : The number of terms from the series to sum. + * + */ + T result = init_val; + for (std::uint64_t i = 0; i < num_terms; ++i) { + result += g(); + } + return result; + } + + /* Performs one step of Kahan summation. */ + template + SPECFUN_HOST_DEVICE void kahan_step(T& sum, T& comp, T x) { + T y = x - comp; + T t = sum + y; + comp = (t - sum) - y; + sum = t; + } + + /* Evaluates an infinite series using Kahan summation. + * + * Denote the series by + * + * S = a[0] + a[1] + a[2] + ... + * + * And for n = 0, 1, 2, ..., denote its n-th partial sum by + * + * S[n] = a[0] + a[1] + ... + a[n] + * + * This function computes S[0], S[1], ... until a[n] is sufficiently + * small or if the maximum number of terms have been evaluated. + * + * Parameters + * ---------- + * g + * Reference to generator that yields the sequence of values a[1], + * a[2], a[3], ... + * + * tol + * Relative tolerance for convergence. Specifically, stop iteration + * as soon as `abs(a[n]) <= tol * abs(S[n])` for some n >= 1. + * + * max_terms + * Maximum number of terms after a[0] to evaluate. It should be set + * large enough such that the convergence criterion is guaranteed + * to have been satisfied within that many terms if there is no + * rounding error. + * + * init_val + * a[0]. Default is zero. The type of this parameter (T) is used + * for intermediary computations as well as the result. + * + * Return Value + * ------------ + * If the convergence criterion is satisfied by some `n <= max_terms`, + * returns `(S[n], n)`. Otherwise, returns `(S[max_terms], 0)`. + */ + template > + SPECFUN_HOST_DEVICE std::pair series_eval_kahan( + Generator &&g, real_type_t tol, std::uint64_t max_terms, T init_val = T(0)) { + + T sum = init_val; + T comp = 0; + for (std::uint64_t i = 0; i < max_terms; ++i) { + T term = g(); + kahan_step(sum, comp, term); + if (std::abs(term) <= tol * std::abs(sum)) { + return {sum, i + 1}; + } + } + return {sum, 0}; + } + + /* Generator that yields the difference of successive convergents of a + * continued fraction. + * + * Let f[n] denote the n-th convergent of a continued fraction: + * + * a[1] a[2] a[n] + * f[n] = b[0] + ------ ------ ... ---- + * b[1] + b[2] + b[n] + * + * with f[0] = b[0]. This generator yields the sequence of values + * f[1]-f[0], f[2]-f[1], f[3]-f[2], ... + * + * Constructor Arguments + * --------------------- + * cf + * Reference to generator that yields the terms of the continued + * fraction as (numerator, denominator) pairs, starting from + * (a[1], b[1]). + * + * `cf` must outlive the ContinuedFractionSeriesGenerator object. + * + * The constructed object always eagerly retrieves the next term + * of the continued fraction. Specifically, (a[1], b[1]) is + * retrieved upon construction, and (a[n], b[n]) is retrieved after + * (n-1) calls of `()`. + * + * Type Arguments + * -------------- + * T + * Type in which computations are performed and results are turned. + * + * Remarks + * ------- + * The series is computed using the recurrence relation described in [1]. + * + * No error checking is performed. The caller must ensure that all terms + * are finite and that intermediary computations do not trigger floating + * point exceptions such as overflow. + * + * The numerical stability of this method depends on the characteristics + * of the continued fraction being evaluated. + * + * Reference + * --------- + * [1] Gautschi, W. (1967). “Computational Aspects of Three-Term + * Recurrence Relations.” SIAM Review, 9(1):24-82. + */ + template >> + class ContinuedFractionSeriesGenerator { + + public: + explicit ContinuedFractionSeriesGenerator(Generator &cf) : cf_(cf) { + init(); + } + + double operator()() { + double v = v_; + advance(); + return v; + } + + private: + void init() { + auto [num, denom] = cf_(); + T a = num; + T b = denom; + u_ = T(1); + v_ = a / b; + b_ = b; + } + + void advance() { + auto [num, denom] = cf_(); + T a = num; + T b = denom; + u_ = T(1) / (T(1) + (a * u_) / (b * b_)); + v_ *= (u_ - T(1)); + b_ = b; + } + + Generator& cf_; // reference to continued fraction generator + T v_; // v[n] == f[n] - f[n-1], n >= 1 + T u_; // u[1] = 1, u[n] = v[n]/v[n-1], n >= 2 + T b_; // last denominator, i.e. b[n-1] + }; + + /* Converts a continued fraction into a series whose terms are the + * difference of its successive convergents. + * + * See ContinuedFractionSeriesGenerator for details. + */ + template >> + SPECFUN_HOST_DEVICE ContinuedFractionSeriesGenerator + continued_fraction_series(Generator &cf) { + return ContinuedFractionSeriesGenerator(cf); + } + +} // namespace detail +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/trig.h b/parrot/lib/python3.10/site-packages/scipy/special/special/trig.h new file mode 100644 index 0000000000000000000000000000000000000000..38bc2c5a5c398b87dfe9d6357d8cf954dff05d17 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/trig.h @@ -0,0 +1,111 @@ +/* Translated from Cython into C++ by SciPy developers in 2023. + * + * Original author: Josh Wilson, 2016. + */ + +/* Implement sin(pi*z) and cos(pi*z) for complex z. Since the periods + * of these functions are integral (and thus better representable in + * floating point), it's possible to compute them with greater accuracy + * than sin(z), cos(z). + */ + +#pragma once + +#include "cephes/trig.h" +#include "config.h" +#include "evalpoly.h" + +namespace special { + +template +SPECFUN_HOST_DEVICE T sinpi(T x) { + return cephes::sinpi(x); +} + +template +SPECFUN_HOST_DEVICE std::complex sinpi(std::complex z) { + T x = z.real(); + T piy = M_PI * z.imag(); + T abspiy = std::abs(piy); + T sinpix = cephes::sinpi(x); + T cospix = cephes::cospi(x); + + if (abspiy < 700) { + return {sinpix * std::cosh(piy), cospix * std::sinh(piy)}; + } + + /* Have to be careful--sinh/cosh could overflow while cos/sin are small. + * At this large of values + * + * cosh(y) ~ exp(y)/2 + * sinh(y) ~ sgn(y)*exp(y)/2 + * + * so we can compute exp(y/2), scale by the right factor of sin/cos + * and then multiply by exp(y/2) to avoid overflow. */ + T exphpiy = std::exp(abspiy / 2); + T coshfac; + T sinhfac; + if (exphpiy == std::numeric_limits::infinity()) { + if (sinpix == 0.0) { + // Preserve the sign of zero. + coshfac = std::copysign(0.0, sinpix); + } else { + coshfac = std::copysign(std::numeric_limits::infinity(), sinpix); + } + if (cospix == 0.0) { + // Preserve the sign of zero. + sinhfac = std::copysign(0.0, cospix); + } else { + sinhfac = std::copysign(std::numeric_limits::infinity(), cospix); + } + return {coshfac, sinhfac}; + } + + coshfac = 0.5 * sinpix * exphpiy; + sinhfac = 0.5 * cospix * exphpiy; + return {coshfac * exphpiy, sinhfac * exphpiy}; +} + +template +SPECFUN_HOST_DEVICE T cospi(T x) { + return cephes::cospi(x); +} + +template +SPECFUN_HOST_DEVICE std::complex cospi(std::complex z) { + T x = z.real(); + T piy = M_PI * z.imag(); + T abspiy = std::abs(piy); + T sinpix = cephes::sinpi(x); + T cospix = cephes::cospi(x); + + if (abspiy < 700) { + return {cospix * std::cosh(piy), -sinpix * std::sinh(piy)}; + } + + // See csinpi(z) for an idea of what's going on here. + T exphpiy = std::exp(abspiy / 2); + T coshfac; + T sinhfac; + if (exphpiy == std::numeric_limits::infinity()) { + if (sinpix == 0.0) { + // Preserve the sign of zero. + coshfac = std::copysign(0.0, cospix); + } else { + coshfac = std::copysign(std::numeric_limits::infinity(), cospix); + } + if (cospix == 0.0) { + // Preserve the sign of zero. + sinhfac = std::copysign(0.0, sinpix); + } else { + sinhfac = std::copysign(std::numeric_limits::infinity(), sinpix); + } + return {coshfac, sinhfac}; + } + + coshfac = 0.5 * cospix * exphpiy; + sinhfac = 0.5 * sinpix * exphpiy; + return {coshfac * exphpiy, sinhfac * exphpiy}; +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/wright_bessel.h b/parrot/lib/python3.10/site-packages/scipy/special/special/wright_bessel.h new file mode 100644 index 0000000000000000000000000000000000000000..cc0fffdb9e67f15e59991e99bd7e5a61ef3c08ca --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/wright_bessel.h @@ -0,0 +1,841 @@ +/* Translated from Cython into C++ by SciPy developers in 2023. + * Original header with Copyright information appears below. + */ + +/* Implementation of Wright's generalized Bessel function Phi, see + * https://dlmf.nist.gov/10.46.E1 + * + * Copyright: Christian Lorentzen + * + * Distributed under the same license as SciPy + * + * + * Implementation Overview: + * + * First, different functions are implemented valid for certain domains of the + * three arguments. + * Finally they are put together in wright_bessel. See the docstring of + * that function for more details. + */ + +#pragma once + +#include "cephes/lanczos.h" +#include "cephes/polevl.h" +#include "cephes/rgamma.h" +#include "config.h" +#include "digamma.h" +#include "error.h" + +namespace special { + +namespace detail { + // rgamma_zero: smallest value x for which rgamma(x) == 0 as x gets large + constexpr double rgamma_zero = 178.47241115886637; + + SPECFUN_HOST_DEVICE inline double exp_rgamma(double x, double y) { + /* Compute exp(x) / gamma(y) = exp(x) * rgamma(y). + * + * This helper function avoids overflow by using the lanczos + * approximation of the gamma function. + */ + return std::exp(x + (1 - std::log(y + cephes::lanczos_g - 0.5)) * (y - 0.5)) / + cephes::lanczos_sum_expg_scaled(y); + } + + SPECFUN_HOST_DEVICE inline double wb_series(double a, double b, double x, unsigned int nstart, unsigned int nstop) { + /* 1. Taylor series expansion in x=0 for x <= 1. + * + * Phi(a, b, x) = sum_k x^k / k! / Gamma(a*k + b) + * + * Note that every term, and therefore also Phi(a, b, x) is + * monotone decreasing with increasing a or b. + */ + double xk_k = std::pow(x, nstart) * cephes::rgamma(nstart + 1); // x^k/k! + double res = xk_k * cephes::rgamma(nstart * a + b); + // term k=nstart+1, +2, +3, ... + if (nstop > nstart) { + // series expansion until term k such that a*k+b <= rgamma_zero + unsigned int k_max = std::floor((rgamma_zero - b) / a); + if (nstop > k_max) { + nstop = k_max; + } + for (unsigned int k = nstart + 1; k < nstop; k++) { + xk_k *= x / k; + res += xk_k * cephes::rgamma(a * k + b); + } + } + return res; + } + + template + SPECFUN_HOST_DEVICE inline double wb_large_a(double a, double b, double x, int n) { + /* 2. Taylor series expansion in x=0, for large a. + * + * Phi(a, b, x) = sum_k x^k / k! / Gamma(a*k + b) + * + * Use Stirling's formula to find k=k_max, the maximum term. + * Then use n terms of Taylor series around k_max. + */ + int k_max = static_cast(std::pow(std::pow(a, -a) * x, 1.0 / (1 + a))); + + int nstart = k_max - n / 2; + if (nstart < 0) { + nstart = 0; + } + + double res = 0; + double lnx = std::log(x); + // For numerical stability, we factor out the maximum term exp(..) with k=k_max + // but only if it is larger than 0. + double max_exponent = std::fmax(0, k_max * lnx - cephes::lgam(k_max + 1) - cephes::lgam(a * k_max + b)); + for (int k = nstart; k < nstart + n; k++) { + res += std::exp(k * lnx - cephes::lgam(k + 1) - cephes::lgam(a * k + b) - max_exponent); + } + + if (!log_wb) { + res *= std::exp(max_exponent); + } else { + // logarithm of Wright's function + res = max_exponent + std::log(res); + } + return res; + } + + template + SPECFUN_HOST_DEVICE inline double wb_small_a(double a, double b, double x, int order) { + /* 3. Taylor series in a=0 up to order 5, for tiny a and not too large x + * + * Phi(a, b, x) = exp(x)/Gamma(b) + * (1 - a*x * Psi(b) + a^2/2*x*(1+x) * (Psi(b)^2 - Psi'(b) + + ... ) + + O(a^6)) + * + * where Psi is the digamma function. + * + * Parameter order takes effect only when b > 1e-3 and 2 <= order <= 5, + * otherwise it defaults to 2, or if b <= 1e-3, to 5. The lower order is, + * the fewer polygamma functions have to be computed. + * + * Call: python _precompute/wright_bessel.py 1 + * + * For small b, i.e. b <= 1e-3, cancellation of poles of digamma(b)/Gamma(b) + * and polygamma needs to be carried out => series expansion in a=0 to order 5 + * and in b=0 to order 4. + * Call: python _precompute/wright_bessel.py 2 + */ + double A[6]; // coefficients of a^k (1, -x * Psi(b), ...) + double B[6]; // powers of b^k/k! or terms in polygamma functions + constexpr double C[5] = { // coefficients of a^k1 * b^k2 + 1.0000000000000000, // C[0] + 1.1544313298030657, // C[1] + -3.9352684291215233, // C[2] + -1.0080632408182857, // C[3] + 19.984633365874979, // C[4] + }; + double X[6] = { // polynomials in x; + 1, // X[0] + x, // X[1] + x * (x + 1), // X[2] + x * (x * (x + 3) + 1), // X[3] + x * (x * (x * (x + 6) + 7) + 1), // X[4] + x * (x * (x * (x * (x + 10) + 25) + 15) + 1), // X[5] + }; + double res; + + if (b <= 1E-3) { + /* Series expansion of both a and b up to order 5: + * M_PI = pi + * M_EG = Euler Gamma aka Euler Mascheroni constant + * M_Z3 = zeta(3) + * C[0] = 1 + * C[1] = 2*M_EG + * C[2] = 3*M_EG^2 - M_PI^2/2 + * C[3] = 4*M_EG^3 - 2*M_EG*M_PI^2 + 8*M_Z3 + * C[4] = 5*M_EG^4 - 5*M_EG^2*M_PI^2 + 40*M_EG*M_Z3 + M_PI^4/12 + */ + B[0] = 1.; + for (int k = 1; k < 5; k++) { + B[k] = b / k * B[k - 1]; + } + // Note that polevl assumes inverse ordering => A[5] = 0th term + A[5] = cephes::rgamma(b); + A[4] = X[1] * (C[0] + C[1] * b + C[2] * B[2] + C[3] * B[3] + C[4] * B[4]); + A[3] = X[2] / 2. * (C[1] + C[2] * b + C[3] * B[2] + C[4] * B[3]); + A[2] = X[3] / 6. * (C[2] + C[3] * b + C[4] * B[2]); + A[1] = X[4] / 24. * (C[3] + C[4] * b); + A[0] = X[5] / 120. * C[4]; + // res = exp(x) * (A[5] + A[4] * a + A[3] * a^2 + A[2] * a^3 + ...) + if (!log_wb) { + res = exp(x) * cephes::polevl(a, A, 5); + } else { + // logarithm of Wright's function + res = x + std::log(cephes::polevl(a, A, 5)); + } + } else { + /* Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5) + * A[n] = a^n/n! + * But here, we repurpose A[n] = X[n] * B[n] / n! + * Note that polevl assumes inverse ordering => A[order] = 0th term */ + double dg = digamma(b); + // pg1 = polygamma(1, b) + double pg1 = cephes::zeta(2, b); + if (order <= 2) { + res = 1 + a * x * (-dg + 0.5 * a * (1 + x) * (dg * dg - pg1)); + } else { + if (order > 5) { + order = 5; + } + // pg2 = polygamma(2, b) + double pg2 = -2 * cephes::zeta(3, b); + B[0] = 1; + B[1] = -dg; + B[2] = dg * dg - pg1; + B[3] = (-dg * dg + 3 * pg1) * dg - pg2; + A[order] = 1; + A[order - 1] = X[1] * B[1]; + A[order - 2] = X[2] * B[2] / 2.; + A[order - 3] = X[3] * B[3] / 6.; + if (order >= 4) { + // double pg3 = polygamma(3, b) + double pg3 = 6 * cephes::zeta(4, b); + B[4] = ((dg * dg - 6 * pg1) * dg + 4 * pg2) * dg + 3 * pg1 * pg1 - pg3; + A[order - 4] = X[4] * B[4] / 24.; + if (order >= 5) { + // pg4 = polygamma(4, b) + double pg4 = -24 * cephes::zeta(5, b); + B[5] = + ((((-dg * dg + 10 * pg1) * dg - 10 * pg2) * dg - 15 * pg1 * pg1 + 5 * pg3) * dg + + 10 * pg1 * pg2 - pg4); + A[order - 5] = X[5] * B[5] / 120.; + } + } + res = cephes::polevl(a, A, order); + } + // res *= exp(x) * rgamma(b) + if (!log_wb) { + res *= exp_rgamma(x, b); + } else { + // logarithm of Wright's function + res = x - cephes::lgam(b) + std::log(res); + } + } + return res; + } + + template + SPECFUN_HOST_DEVICE inline double wb_asymptotic(double a, double b, double x) { + /* 4. Asymptotic expansion for large x up to order 8 + * + * Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k + * + * with Z = (a*x)^(1/(1+a)). + * Call: python _precompute/wright_bessel.py 3 + */ + double A[15]; // powers of a + double B[17]; // powers of b + double Ap1[9]; // powers of (1+a) + double C[9]; // coefficients of asymptotic series a_k + + A[0] = 1.; + B[0] = 1.; + Ap1[0] = 1.; + for (int k = 1; k < 15; k++) { + A[k] = A[k - 1] * a; + } + for (int k = 1; k < 17; k++) { + B[k] = B[k - 1] * b; + } + for (int k = 1; k < 9; k++) { + Ap1[k] = Ap1[k - 1] * (1 + a); + } + + C[0] = 1. / std::sqrt(2. * M_PI * Ap1[1]); + + C[1] = C[0] / (24 * Ap1[1]); + C[1] *= (2 * a + 1) * (2 + a) - 12 * b * (1 + a - b); + + C[2] = C[0] / (1152 * Ap1[2]); + C[2] *= + (144 * B[4] - 96 * B[3] * (5 * a + 1) + 24 * B[2] * (20 * A[2] + 5 * a - 4) - + 24 * b * Ap1[1] * (6 * A[2] - 7 * a - 2) + (a + 2) * (2 * a + 1) * (2 * A[2] - 19 * a + 2)); + + C[3] = C[0] / (414720 * Ap1[3]); + C[3] *= + (8640 * B[6] - 8640 * B[5] * (7 * a - 1) + 10800 * B[4] * (14 * A[2] - 7 * a - 2) - + 1440 * B[3] * (112 * A[3] - 147 * A[2] - 63 * a + 8) + + 180 * B[2] * (364 * A[4] - 1288 * A[3] - 567 * A[2] + 392 * a + 76) - + 180 * b * Ap1[1] * (20 * A[4] - 516 * A[3] + 417 * A[2] + 172 * a - 12) - + (a + 2) * (2 * a + 1) * (556 * A[4] + 1628 * A[3] - 9093 * A[2] + 1628 * a + 556)); + + C[4] = C[0] / (39813120 * Ap1[4]); + C[4] *= + (103680 * B[8] - 414720 * B[7] * (3 * a - 1) + 725760 * B[6] * a * (8 * a - 7) - + 48384 * B[5] * (274 * A[3] - 489 * A[2] + 39 * a + 26) + + 30240 * B[4] * (500 * A[4] - 1740 * A[3] + 495 * A[2] + 340 * a - 12) - + 2880 * B[3] * (2588 * A[5] - 19780 * A[4] + 14453 * A[3] + 9697 * A[2] - 1892 * a - 404) + + 48 * B[2] * + (11488 * A[6] - 547836 * A[5] + 1007484 * A[4] + 593353 * A[3] - 411276 * A[2] - 114396 * a + 4288) + + 48 * b * Ap1[1] * + (7784 * A[6] + 48180 * A[5] - 491202 * A[4] + 336347 * A[3] + 163734 * A[2] - 28908 * a - 5560) - + (a + 2) * (2 * a + 1) * + (4568 * A[6] - 226668 * A[5] - 465702 * A[4] + 2013479 * A[3] - 465702 * A[2] - 226668 * a + 4568)); + + C[5] = C[0] / (6688604160. * Ap1[5]); + C[5] *= + (1741824 * B[10] - 2903040 * B[9] * (11 * a - 5) + 2177280 * B[8] * (110 * A[2] - 121 * a + 14) - + 580608 * B[7] * (1628 * A[3] - 3333 * A[2] + 1023 * a + 52) + + 169344 * B[6] * (12364 * A[4] - 43648 * A[3] + 26763 * A[2] + 1232 * a - 788) - + 24192 * B[5] * (104852 * A[5] - 646624 * A[4] + 721391 * A[3] - 16841 * A[2] - 74096 * a + 148) + + 2016 * B[4] * + (710248 * A[6] - 8878716 * A[5] + 17928834 * A[4] - 3333407 * A[3] - 4339566 * A[2] + 287364 * a + + 89128) - + 1344 * B[3] * + (87824 * A[7] - 7150220 * A[6] + 29202756 * A[5] - 15113527 * A[4] - 14223011 * A[3] + 3462492 * A[2] + + 1137092 * a - 18896) - + 84 * B[2] * + (1690480 * A[8] + 14139136 * A[7] - 232575464 * A[6] + 296712592 * A[5] + 215856619 * A[4] - + 152181392 * A[3] - 47718440 * A[2] + 5813632 * a + 943216) + + 84 * b * Ap1[1] * + (82224 * A[8] - 5628896 * A[7] - 26466520 * A[6] + 168779208 * A[5] - 104808005 * A[4] - + 56259736 * A[3] + 15879912 * A[2] + 4020640 * a - 63952) + + (a + 2) * (2 * a + 1) * + (2622064 * A[8] + 12598624 * A[7] - 167685080 * A[6] - 302008904 * A[5] + 1115235367. * A[4] - + 302008904 * A[3] - 167685080 * A[2] + 12598624 * a + 2622064)); + + C[6] = C[0] / (4815794995200. * Ap1[6]); + C[6] *= + (104509440 * B[12] - 209018880 * B[11] * (13 * a - 7) + 574801920 * B[10] * (52 * A[2] - 65 * a + 12) - + 63866880 * B[9] * (2834 * A[3] - 6279 * A[2] + 2769 * a - 134) + + 23950080 * B[8] * (27404 * A[4] - 98228 * A[3] + 78663 * A[2] - 10868 * a - 1012) - + 13685760 * B[7] * (105612 * A[5] - 599196 * A[4] + 791843 * A[3] - 224913 * A[2] - 27612 * a + 4540) + + 2661120 * B[6] * + (693680 * A[6] - 6473532 * A[5] + 13736424 * A[4] - 7047469 * A[3] - 723840 * A[2] + 471588 * a + 7376 + ) - + 2661120 * B[5] * + (432536 * A[7] - 7850804 * A[6] + 27531114 * A[5] - 24234457 * A[4] - 703001 * A[3] + 3633474 * A[2] - + 36244 * a - 45128) + + 166320 * B[4] * + (548912 * A[8] - 75660832 * A[7] + 502902712 * A[6] - 764807992 * A[5] + 91248287 * A[4] + + 217811464 * A[3] - 20365384 * A[2] - 9776416 * a + 37936) + + 10080 * B[3] * + (18759728 * A[9] + 165932208 * A[8] - 4710418440. * A[7] + 13686052536. * A[6] - 5456818809. * A[5] - + 6834514245. * A[4] + 1919299512. * A[3] + 752176152 * A[2] - 45661200 * a - 8616848) - + 360 * B[2] * + (32743360 * A[10] - 3381871792. * A[9] - 21488827776. * A[8] + 200389923864. * A[7] - + 198708005340. * A[6] - 171633799779. * A[5] + 123124874028. * A[4] + 40072774872. * A[3] - + 9137993280. * A[2] - 1895843248. * a + 18929728) - + 360 * b * Ap1[1] * + (57685408 * A[10] + 406929456 * A[9] - 6125375760. * A[8] - 27094918920. * A[7] + + 128752249410. * A[6] - 74866710561. * A[5] - 42917416470. * A[4] + 16256951352. * A[3] + + 4375268400. * A[2] - 316500688 * a - 47197152) + + (a + 2) * (2 * a + 1) * + (167898208 * A[10] - 22774946512. * A[9] - 88280004528. * A[8] + 611863976472. * A[7] + + 1041430242126. * A[6] - 3446851131657. * A[5] + 1041430242126. * A[4] + 611863976472. * A[3] - + 88280004528. * A[2] - 22774946512. * a + 167898208)); + + C[7] = C[0] / (115579079884800. * Ap1[7]); + C[7] *= + (179159040 * B[14] - 1254113280. * B[13] * (5 * a - 3) + 1358622720. * B[12] * (70 * A[2] - 95 * a + 22) - + 905748480 * B[11] * (904 * A[3] - 2109 * A[2] + 1119 * a - 112) + + 1245404160. * B[10] * (3532 * A[4] - 12824 * A[3] + 11829 * A[2] - 2824 * a + 44) - + 59304960 * B[9] * (256820 * A[5] - 1397680 * A[4] + 2025545 * A[3] - 869495 * A[2] + 52000 * a + 8788) + + 14826240 * B[8] * + (2274536 * A[6] - 18601572 * A[5] + 40698318 * A[4] - 28230079 * A[3] + 3916398 * A[2] + 832668 * a - + 65176) - + 59304960 * B[7] * + (760224 * A[7] - 9849164 * A[6] + 32495784 * A[5] - 34813869 * A[4] + 9175207 * A[3] + 1898688 * A[2] - + 469788 * a - 13184) + + 25945920 * B[6] * + (1167504 * A[8] - 28779840 * A[7] + 149752856 * A[6] - 246026112 * A[5] + 111944073 * A[4] + + 18341600 * A[3] - 12131496 * A[2] - 274368 * a + 102800) - + 157248 * B[5] * + (12341872 * A[9] - 3122991216. * A[8] + 29900054232. * A[7] - 78024816720. * A[6] + + 58914656739. * A[5] + 4637150811. * A[4] - 11523402480. * A[3] + 236218968 * A[2] + 337923216 * a + + 1592048) - + 28080 * B[4] * + (265154912 * A[10] + 2276098704. * A[9] - 105569461008. * A[8] + 496560666360. * A[7] - + 627891462858. * A[6] + 41935358025. * A[5] + 203913875814. * A[4] - 23984801544. * A[3] - + 13869306000. * A[2] + 372786832 * a + 103532640) + + 1440 * B[3] * + (310292864 * A[11] - 55169117872. * A[10] - 358957020112. * A[9] + 5714152556088. * A[8] - + 13241597459352. * A[7] + 4220720097141. * A[6] + 6845418090249. * A[5] - 2129559215808. * A[4] - + 909225098472. * A[3] + 107518582576. * A[2] + 25619444368. * a - 113832704) + + 12 * B[2] * + (135319651136. * A[12] + 1119107842176. * A[11] - 22193518174320. * A[10] - 133421793595520. * A[9] + + 860103051087996. * A[8] - 703353374803080. * A[7] - 704240127687381. * A[6] + + 513111704637960. * A[5] + 166909061348316. * A[4] - 57671564069120. * A[3] - 12453426246000. * A[2] + + 695901207936. * a + 93786157376.) - + 12 * b * Ap1[1] * + (4365353408. * A[12] - 720248637504. * A[11] - 4222331152560. * A[10] + 29413934270560. * A[9] + + 132123980710980. * A[8] - 511247376962820. * A[7] + 283403639131779. * A[6] + + 170415792320940. * A[5] - 79274388426588. * A[4] - 21009953050400. * A[3] + 3284035340880. * A[2] + + 589294339776. * a - 3693760576.) - + (a + 2) * (2 * a + 1) * + (34221025984. * A[12] + 226022948160. * A[11] - 5067505612464. * A[10] - 18868361443936. * A[9] + + 86215425028308. * A[8] + 143500920544692. * A[7] - 437682618704613. * A[6] + 143500920544692. * A[5] + + 86215425028308. * A[4] - 18868361443936. * A[3] - 5067505612464. * A[2] + 226022948160. * a + + 34221025984.)); + + C[8] = C[0] / (22191183337881600. * Ap1[8]); + C[8] *= + (2149908480. * B[16] - 5733089280. * B[15] * (17 * a - 11) + + 7166361600. * B[14] * (272 * A[2] - 391 * a + 104) - + 3344302080. * B[13] * (6766 * A[3] - 16371 * A[2] + 9741 * a - 1306) + + 1811496960. * B[12] * (93092 * A[4] - 341564 * A[3] + 344199 * A[2] - 104924 * a + 6308) - + 517570560 * B[11] * + (1626220 * A[5] - 8641508 * A[4] + 13274773 * A[3] - 6952303 * A[2] + 1007420 * a + 5564) + + 284663808 * B[10] * + (9979136 * A[6] - 75766892 * A[5] + 169256148 * A[4] - 136824959 * A[3] + 35714348 * A[2] - + 463692 * a - 293664) - + 1423319040. * B[9] * + (4466648 * A[7] - 49231116 * A[6] + 157507414 * A[5] - 187114257 * A[4] + 78372295 * A[3] - + 4470082 * A[2] - 1913996 * a + 82424) + + 266872320 * B[8] * + (33133136 * A[8] - 564264544 * A[7] + 2618606424. * A[6] - 4491310104. * A[5] + 2853943765. * A[4] - + 374694552 * A[3] - 135365288 * A[2] + 17623968 * a + 696912) - + 2156544 * B[7] * + (2914256144. * A[9] - 93491712432. * A[8] + 664876176984. * A[7] - 1661362937880. * A[6] + + 1563719627313. * A[5] - 382840842843. * A[4] - 115399415640. * A[3] + 34565562936. * A[2] + + 1609337232. * a - 217321904) + + 179712 * B[6] * + (1266018560. * A[10] - 789261834512. * A[9] + 10186841596896. * A[8] - 38877799073352. * A[7] + + 54334425968952. * A[6] - 22529574889533. * A[5] - 5132942328000. * A[4] + 3438377465592. * A[3] + + 84287641248. * A[2] - 72493479440. * a - 807415936) + + 13824 * B[5] * + (156356794976. * A[11] + 1180898077328. * A[10] - 90615270907936. * A[9] + 609258947056248. * A[8] - + 1312655191366722. * A[7] + 885900509321745. * A[6] + 112162151855265. * A[5] - + 212803071513258. * A[4] + 6805217831352. * A[3] + 10051742651296. * A[2] - 55035924848. * a - + 52946379296.) - + 576 * B[4] * + (143943926464. * A[12] - 60115486481856. * A[11] - 376366989757200. * A[10] + + 9534223075576160. * A[9] - 35603777465262396. * A[8] + 39375990156664980. * A[7] - + 868175004137259. * A[6] - 14279180718355020. * A[5] + 1985747535239364. * A[4] + + 1264001337603680. * A[3] - 75972792514320. * A[2] - 23855850572736. * a - 4996648256.) - + 384 * B[3] * + (2038525473856. * A[13] + 16057322146112. * A[12] - 502133360559024. * A[11] - + 2985686417468080. * A[10] + 32418922182093292. * A[9] - 63665380623022452. * A[8] + + 16481208821092575. * A[7] + 34161547357596099. * A[6] - 11490298497454932. * A[5] - + 5117272758337156. * A[4] + 933703210750480. * A[3] + 234855186762000. * A[2] - 7860524600000. * a - + 1226607567040.) + + 96 * B[2] * + (324439754752. * A[14] - 77231415197120. * A[13] - 539102931841856. * A[12] + + 4618258299956336. * A[11] + 28588485529469792. * A[10] - 141383982651179428. * A[9] + + 98783147840417772. * A[8] + 112831723492305801. * A[7] - 83329761150975036. * A[6] - + 26553582937192900. * A[5] + 12469117738765952. * A[4] + 2587165396642160. * A[3] - + 340406368038080. * A[2] - 53659641606080. * a + 219671272960.) + + 96 * b * Ap1[1] * + (1026630779520. * A[14] + 8781958472768. * A[13] - 210659786204384. * A[12] - + 1222283505284208. * A[11] + 5064251967491416. * A[10] + 24013052207628140. * A[9] - + 79710880160087370. * A[8] + 42596558293213227. * A[7] + 26570293386695790. * A[6] - + 14407831324576884. * A[5] - 3617322833922440. * A[4] + 950664948554384. * A[3] + + 172358006894496. * A[2] - 7430887938496. * a - 889746675584.) - + (a + 2) * (2 * a + 1) * + (573840801152. * A[14] - 156998277198784. * A[13] - 898376974770592. * A[12] + + 8622589006459984. * A[11] + 32874204024803560. * A[10] - 111492707520083828. * A[9] - + 184768503480287646. * A[8] + 528612016938984183. * A[7] - 184768503480287646. * A[6] - + 111492707520083828. * A[5] + 32874204024803560. * A[4] + 8622589006459984. * A[3] - + 898376974770592. * A[2] - 156998277198784. * a + 573840801152.)); + + double Z = std::pow(a * x, 1 / Ap1[1]); + double Zp = 1.; + double res = C[0]; + for (int k = 1; k < 9; k++) { + Zp /= Z; + res += (k % 2 == 0 ? 1 : -1) * C[k] * Zp; + } + if (!log_wb) { + res *= std::pow(Z, 0.5 - b) * std::exp(Ap1[1] / a * Z); + } else { + // logarithm of Wright's function + res = std::log(Z) * (0.5 - b) + Ap1[1] / a * Z + std::log(res); + } + return res; + } + + SPECFUN_HOST_DEVICE inline double wb_Kmod(double exp_term, double eps, double a, double b, double x, double r) { + /* Compute integrand Kmod(eps, a, b, x, r) for Gauss-Laguerre quadrature. + * + * K(a, b, x, r+eps) = exp(-r-eps) * Kmod(eps, a, b, x, r) + * + * Kmod(eps, a, b, x, r) = exp(x * (r+eps)^(-a) * cos(pi*a)) * (r+eps)^(-b) + * * sin(x * (r+eps)^(-a) * sin(pi*a) + pi * b) + * + * Note that we additionally factor out exp(exp_term) which helps with large + * terms in the exponent of exp(...) + */ + double x_r_a = x * std::pow(r + eps, -a); + return std::exp(x_r_a * cephes::cospi(a) + exp_term) * std::pow(r + eps, -b) * + std::sin(x_r_a * cephes::sinpi(a) + M_PI * b); + } + + SPECFUN_HOST_DEVICE inline double wb_P(double exp_term, double eps, double a, double b, double x, double phi) { + /* Compute integrand P for Gauss-Legendre quadrature. + * + * P(eps, a, b, x, phi) = exp(eps * cos(phi) + x * eps^(-a) * cos(a*phi)) + * * cos(eps * sin(phi) - x * eps^(-a) * sin(a*phi) + * + (1-b)*phi) + * + * Note that we additionally factor out exp(exp_term) which helps with large + * terms in the exponent of exp(...) + */ + double x_eps_a = x * std::pow(eps, -a); + return std::exp(eps * std::cos(phi) + x_eps_a * std::cos(a * phi) + exp_term) * + std::cos(eps * std::sin(phi) - x_eps_a * std::sin(a * phi) + (1 - b) * phi); + } + + /* roots of laguerre polynomial of order 50 + * scipy.special.roots_laguerre(50)[0] or + * sympy.integrals.quadrature.import gauss_laguerre(50, 16)[0] */ + constexpr double wb_x_laguerre[] = { + 0.02863051833937908, 0.1508829356769337, 0.3709487815348964, 0.6890906998810479, 1.105625023539913, + 1.620961751102501, 2.23561037591518, 2.950183366641835, 3.765399774405782, 4.682089387559285, + 5.70119757478489, 6.823790909794551, 8.051063669390792, 9.384345308258407, 10.82510903154915, + 12.37498160875746, 14.03575459982991, 15.80939719784467, 17.69807093335025, 19.70414653546156, + 21.83022330657825, 24.0791514444115, 26.45405784125298, 28.95837601193738, 31.59588095662286, + 34.37072996309045, 37.28751061055049, 40.35129757358607, 43.56772026999502, 46.94304399160304, + 50.48426796312992, 54.19924488016862, 58.09682801724853, 62.18705417568891, 66.48137387844482, + 70.99294482661949, 75.73701154772731, 80.73140480247769, 85.99721113646323, 91.55969041253388, + 97.44956561485056, 103.7048912366923, 110.3738588076403, 117.5191982031112, 125.2254701334734, + 133.6120279227287, 142.8583254892541, 153.2603719726036, 165.3856433166825, 180.6983437092145 + }; + /* weights for laguerre polynomial of order 50 + * sympy.integrals.quadrature.import gauss_laguerre(50, 16)[1] */ + constexpr double wb_w_laguerre[] = { + 0.07140472613518988, 0.1471486069645884, 0.1856716275748313, 0.1843853825273539, + 0.1542011686063556, 0.1116853699022688, 0.07105288549019586, 0.04002027691150833, + 0.02005062308007171, 0.008960851203646281, 0.00357811241531566, 0.00127761715678905, + 0.0004080302449837189, 0.0001165288322309724, 2.974170493694165e-5, 6.777842526542028e-6, + 1.37747950317136e-6, 2.492886181720092e-7, 4.010354350427827e-8, 5.723331748141425e-9, + 7.229434249182665e-10, 8.061710142281779e-11, 7.913393099943723e-12, 6.81573661767678e-13, + 5.13242671658949e-14, 3.365624762437814e-15, 1.913476326965035e-16, 9.385589781827253e-18, + 3.950069964503411e-19, 1.417749517827512e-20, 4.309970276292175e-22, 1.101257519845548e-23, + 2.344617755608987e-25, 4.11854415463823e-27, 5.902246763596448e-29, 6.812008916553065e-31, + 6.237449498812102e-33, 4.452440579683377e-35, 2.426862352250487e-37, 9.852971481049686e-40, + 2.891078872318428e-42, 5.906162708112361e-45, 8.01287459750397e-48, 6.789575424396417e-51, + 3.308173010849252e-54, 8.250964876440456e-58, 8.848728128298018e-62, 3.064894889844417e-66, + 1.988708229330752e-71, 6.049567152238783e-78 + }; + /* roots of legendre polynomial of order 50 + * sympy.integrals.quadrature.import gauss_legendre(50, 16)[0] */ + constexpr double wb_x_legendre[] = { + -0.998866404420071, -0.9940319694320907, -0.9853540840480059, -0.9728643851066921, -0.9566109552428079, + -0.9366566189448779, -0.9130785566557919, -0.885967979523613, -0.8554297694299461, -0.8215820708593359, + -0.7845558329003993, -0.7444943022260685, -0.7015524687068223, -0.6558964656854394, -0.6077029271849502, + -0.5571583045146501, -0.5044581449074642, -0.4498063349740388, -0.3934143118975651, -0.3355002454194374, + -0.276288193779532, -0.2160072368760418, -0.1548905899981459, -0.09317470156008614, -0.03109833832718888, + 0.03109833832718888, 0.09317470156008614, 0.1548905899981459, 0.2160072368760418, 0.276288193779532, + 0.3355002454194374, 0.3934143118975651, 0.4498063349740388, 0.5044581449074642, 0.5571583045146501, + 0.6077029271849502, 0.6558964656854394, 0.7015524687068223, 0.7444943022260685, 0.7845558329003993, + 0.8215820708593359, 0.8554297694299461, 0.885967979523613, 0.9130785566557919, 0.9366566189448779, + 0.9566109552428079, 0.9728643851066921, 0.9853540840480059, 0.9940319694320907, 0.998866404420071 + }; + /* weights for legendre polynomial of order 50 + * sympy.integrals.quadrature.import gauss_legendre(50, 16)[1] */ + constexpr double wb_w_legendre[] = { + 0.002908622553155141, 0.006759799195745401, 0.01059054838365097, 0.01438082276148557, 0.01811556071348939, + 0.02178024317012479, 0.02536067357001239, 0.0288429935805352, 0.03221372822357802, 0.03545983561514615, + 0.03856875661258768, 0.0415284630901477, 0.04432750433880328, 0.04695505130394843, 0.04940093844946632, + 0.05165570306958114, 0.05371062188899625, 0.05555774480621252, 0.05718992564772838, 0.05860084981322245, + 0.05978505870426546, 0.06073797084177022, 0.06145589959031666, 0.06193606742068324, 0.06217661665534726, + 0.06217661665534726, 0.06193606742068324, 0.06145589959031666, 0.06073797084177022, 0.05978505870426546, + 0.05860084981322245, 0.05718992564772838, 0.05555774480621252, 0.05371062188899625, 0.05165570306958114, + 0.04940093844946632, 0.04695505130394843, 0.04432750433880328, 0.0415284630901477, 0.03856875661258768, + 0.03545983561514615, 0.03221372822357802, 0.0288429935805352, 0.02536067357001239, 0.02178024317012479, + 0.01811556071348939, 0.01438082276148557, 0.01059054838365097, 0.006759799195745401, 0.002908622553155141 + }; + /* Fitted parameters for optimal choice of eps + * Call: python _precompute/wright_bessel.py 4 */ + constexpr double wb_A[] = {0.41037, 0.30833, 6.9952, 18.382, -2.8566, 2.1122}; + + template + SPECFUN_HOST_DEVICE inline double wright_bessel_integral(double a, double b, double x) { + /* 5. Integral representation + * + * K(a, b, x, r) = exp(-r + x * r^(-a) * cos(pi*a)) * r^(-b) + * * sin(x * r^(-a) * sin(pi*a) + pi * b) + * P(eps, a, b, x, phi) = exp(eps * cos(phi) + x * eps^(-a) * cos(a*phi)) + * * cos(eps * sin(phi) - x * eps^(-a) * sin(a*phi) + * + (1-b)*phi) + * + * Phi(a, b, x) = 1/pi * int_eps^inf K(a, b, x, r) * dr + * + eps^(1-b)/pi * int_0^pi P(eps, a, b, x, phi) * dphi + * + * for any eps > 0. + * + * Note that P has a misprint in Luchko (2008) Eq. 9, the cos(phi(beta-1)) at + * the end of the first line should be removed and the −sin(phi(beta−1)) at + * the end of the second line should read +(1-b)*phi. + * This integral representation introduced the free parameter eps (from the + * radius of complex contour integration). We try to choose eps such that + * the integrand behaves smoothly. Note that this is quite diffrent from how + * Luchko (2008) deals with eps: he is either looking for the limit eps -> 0 + * or he sets (silently) eps=1. But having the freedom to set eps is much more + * powerful for numerical evaluation. + * + * As K has a leading exp(-r), we factor this out and apply Gauss-Laguerre + * quadrature rule: + * + * int_0^inf K(a, b, x, r+eps) dr = exp(-eps) int_0^inf exp(-r) Kmod(.., r) dr + * + * Note the shift r -> r+eps to have integation from 0 to infinity. + * The integral over P is done via a Gauss-Legendre quadrature rule. + * + * Note: Hardest argument range is large z, large b and small eps. + */ + + /* We use the free choice of eps to make the integral better behaved. + * 1. Concern is oscillatory behaviour of P. Therefore, we'd like to + * make the change in the argument of cosine small, i.e. make arc length + * int_0^phi sqrt(1 + f'(phi)^2) dphi small, with + * f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a*phi) + (1-b)*phi + * Proxy, make |f'(phi)| small. + * 2. Concern is int_0 K ~ int_0 (r+eps)^(-b) .. dr + * This is difficult as r -> 0 for large b. It behaves better for larger + * values of eps. + */ + + // Minimize oscillatory behavoir of P + double eps = + (wb_A[0] * b * std::exp(-0.5 * a) + + std::exp( + wb_A[1] + 1 / (1 + a) * std::log(x) - wb_A[2] * std::exp(-wb_A[3] * a) + + wb_A[4] / (1 + std::exp(wb_A[5] * a)) + )); + + if (a >= 4 && x >= 100) { + eps += 1; // This part is hard to fit + } + + // Large b + if (b >= 8) { + /* Make P small compared to K by setting eps large enough. + * int K ~ exp(-eps) and int P ~ eps^(1-b) */ + eps = std::fmax(eps, std::pow(b, -b / (1. - b)) + 0.1 * b); + } + + // safeguard, higher better for larger a, lower better for tiny a. + eps = std::fmin(eps, 150.); + eps = std::fmax(eps, 3.); // 3 seems to be a pretty good choice in general. + + // We factor out exp(-exp_term) from wb_Kmod and wb_P to avoid overflow of + // exp(..). + double exp_term = 0; + // From the exponent of K: + double r = wb_x_laguerre[50-1]; // largest value of x used in wb_Kmod + double x_r_a = x * std::pow(r + eps, -a); + exp_term = std::fmax(exp_term, x_r_a * cephes::cospi(a)); + // From the exponent of P: + double x_eps_a = x * std::pow(eps, -a); + // phi = 0 => cos(phi) = cos(a * phi) = 1 + exp_term = std::fmax(exp_term, eps + x_eps_a); + // phi = pi => cos(phi) = -1 + exp_term = std::fmax(exp_term, -eps + x_eps_a * cephes::cospi(a)); + + double res1 = 0; + double res2 = 0; + + double y; + for (int k = 0; k < 50; k++) { + res1 += wb_w_laguerre[k] * wb_Kmod(-exp_term, eps, a, b, x, wb_x_laguerre[k]); + // y = (b-a)*(x+1)/2.0 + a for integration from a=0 to b=pi + y = M_PI * (wb_x_legendre[k] + 1) / 2.0; + res2 += wb_w_legendre[k] * wb_P(-exp_term, eps, a, b, x, y); + } + res1 *= std::exp(-eps); + // (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1) + res2 *= M_PI / 2.0; + res2 *= std::pow(eps, 1 - b); + + if (!log_wb) { + // Remember the factored out exp_term from wb_Kmod and wb_P + return std::exp(exp_term) / M_PI * (res1 + res2); + } else { + // logarithm of Wright's function + return exp_term + std::log((res1 + res2) / M_PI); + } + } +} // namespace detail + +template +SPECFUN_HOST_DEVICE inline double wright_bessel_t(double a, double b, double x) { + /* Compute Wright's generalized Bessel function for scalar arguments. + * + * According to [1], it is an entire function defined as + * + * .. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)} + * + * So far, only non-negative values of rho=a, beta=b and z=x are implemented. + * There are 5 different approaches depending on the ranges of the arguments: + * + * 1. Taylor series expansion in x=0 [1], for x <= 1. + * Involves gamma funtions in each term. + * 2. Taylor series expansion in x=0 [2], for large a. + * 3. Taylor series in a=0, for tiny a and not too large x. + * 4. Asymptotic expansion for large x [3, 4]. + * Suitable for large x while still small a and b. + * 5. Integral representation [5], in principle for all arguments. + * + * References + * ---------- + * [1] https://dlmf.nist.gov/10.46.E1 + * [2] P. K. Dunn, G. K. Smyth (2005), Series evaluation of Tweedie exponential + * dispersion model densities. Statistics and Computing 15 (2005): 267-280. + * [3] E. M. Wright (1935), The asymptotic expansion of the generalized Bessel + * function. Proc. London Math. Soc. (2) 38, pp. 257-270. + * https://doi.org/10.1112/plms/s2-38.1.257 + * [4] R. B. Paris (2017), The asymptotics of the generalised Bessel function, + * Mathematica Aeterna, Vol. 7, 2017, no. 4, 381 - 406, + * https://arxiv.org/abs/1711.03006 + * [5] Y. F. Luchko (2008), Algorithms for Evaluation of the Wright Function for + * the Real Arguments' Values, Fractional Calculus and Applied Analysis 11(1) + * http://sci-gems.math.bas.bg/jspui/bitstream/10525/1298/1/fcaa-vol11-num1-2008-57p-75p.pdf + */ + if (std::isnan(a) || std::isnan(b) || std::isnan(x)) { + return std::numeric_limits::quiet_NaN(); + } + if (a < 0 || b < 0 || x < 0) { + set_error("wright_bessel", SF_ERROR_DOMAIN, NULL); + return std::numeric_limits::quiet_NaN(); + } + if (std::isinf(x)) { + if (std::isinf(a) || std::isinf(b)) { + return std::numeric_limits::quiet_NaN(); + } + return std::numeric_limits::infinity(); + } + if (std::isinf(a) || std::isinf(b)) { + return std::numeric_limits::quiet_NaN(); // or 0 + } + if (a >= detail::rgamma_zero || b >= detail::rgamma_zero) { + set_error("wright_bessel", SF_ERROR_OVERFLOW, NULL); + return std::numeric_limits::quiet_NaN(); + } + if (x == 0) { + // return rgamma(b) + if (!log_wb) { + return cephes::rgamma(b); + } else { + // logarithm of Wright's function + return -cephes::lgam(b); + } + } + if (a == 0) { + // return exp(x) * rgamma(b) + if (!log_wb) { + return detail::exp_rgamma(x, b); + } else { + // logarithm of Wright's function + return x - cephes::lgam(b); + } + } + + constexpr double exp_inf = 709.78271289338403; + int order; + if ((a <= 1e-3 && b <= 50 && x <= 9) || (a <= 1e-4 && b <= 70 && x <= 100) || + (a <= 1e-5 && b <= 170 && (x < exp_inf || (log_wb && x <= 1e3)))) { + /* Taylor Series expansion in a=0 to order=order => precision <= 1e-11 + * If beta is also small => precision <= 1e-11. + * max order = 5 */ + if (a <= 1e-5) { + if (x <= 1) { + order = 2; + } else if (x <= 10) { + order = 3; + } else if (x <= 100) { + order = 4; + } else { // x < exp_inf + order = 5; + } + } else if (a <= 1e-4) { + if (x <= 1e-2) { + order = 2; + } else if (x <= 1) { + order = 3; + } else if (x <= 10) { + order = 4; + } else { // x <= 100 + order = 5; + } + } else { // a <= 1e-3 + if (x <= 1e-5) { + order = 2; + } else if (x <= 1e-1) { + order = 3; + } else if (x <= 1) { + order = 4; + } else { // x <= 9 + order = 5; + } + } + + return detail::wb_small_a(a, b, x, order); + } + + if (x <= 1) { + // 18 term Taylor Series => error mostly smaller 5e-14 + double res = detail::wb_series(a, b, x, 0, 18); + if (log_wb) res = std::log(res); + return res; + } + if (x <= 2) { + // 20 term Taylor Series => error mostly smaller 1e-12 to 1e-13 + return detail::wb_series(a, b, x, 0, 20); + } + if (a >= 5) { + /* Taylor series around the approximate maximum term. + * Set number of terms=order. */ + if (a >= 10) { + if (x <= 1e11) { + order = 6; + } else { + order = static_cast(std::fmin(std::log10(x) - 5 + b / 10, 30)); + } + } else { + if (x <= 1e4) { + order = 6; + } else if (x <= 1e8) { + order = static_cast(2 * std::log10(x)); + } else if (x <= 1e10) { + order = static_cast(4 * std::log10(x) - 16); + } else { + order = static_cast(std::fmin(6 * std::log10(x) - 36, 100)); + } + } + return detail::wb_large_a(a, b, x, order); + } + if (std::pow(a * x, 1 / (1. + a)) >= 14 + b * b / (2 * (1 + a))) { + /* Asymptotic expansion in Z = (a*x)^(1/(1+a)) up to 8th term 1/Z^8. + * For 1/Z^k, the highest term in b is b^(2*k) * a0 / (2^k k! (1+a)^k). + * As a0 is a common factor to all orders, this explains a bit the + * domain of good convergence set above. + * => precision ~ 1e-11 but can go down to ~1e-8 or 1e-7 + * Note: We ensured a <= 5 as this is a bad approximation for large a. */ + return detail::wb_asymptotic(a, b, x); + } + if (0.5 <= a && a <= 1.8 && 100 <= b && 1e5 <= x) { + // This is a very hard domain. This condition is placed after wb_asymptotic. + // TODO: Explore ways to cover this domain. + return std::numeric_limits::quiet_NaN(); + } + return detail::wright_bessel_integral(a, b, x); +} + + +SPECFUN_HOST_DEVICE inline double wright_bessel(double a, double b, double x) { + return wright_bessel_t(a, b, x); +} + +SPECFUN_HOST_DEVICE inline float wright_bessel(float a, float b, float x) { + return wright_bessel(static_cast(a), static_cast(b), static_cast(x)); +} + +SPECFUN_HOST_DEVICE inline double log_wright_bessel(double a, double b, double x) { + return wright_bessel_t(a, b, x); +} + +SPECFUN_HOST_DEVICE inline float log_wright_bessel(float a, float b, float x) { + return log_wright_bessel(static_cast(a), static_cast(b), static_cast(x)); +} + +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/special/zlog1.h b/parrot/lib/python3.10/site-packages/scipy/special/special/zlog1.h new file mode 100644 index 0000000000000000000000000000000000000000..37a23f1387babe4dbbf6963cb348dec6a9b9c4f4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/special/zlog1.h @@ -0,0 +1,35 @@ +/* Translated from Cython into C++ by SciPy developers in 2023. + * + * Original author: Josh Wilson, 2016. + */ + +#pragma once + +#include "config.h" + +namespace special { +namespace detail { + + SPECFUN_HOST_DEVICE inline std::complex zlog1(std::complex z) { + /* Compute log, paying special attention to accuracy around 1. We + * implement this ourselves because some systems (most notably the + * Travis CI machines) are weak in this regime. */ + std::complex coeff = -1.0; + std::complex res = 0.0; + + if (std::abs(z - 1.0) > 0.1) { + return std::log(z); + } + + z -= 1.0; + for (int n = 1; n < 17; n++) { + coeff *= -z; + res += coeff / static_cast(n); + if (std::abs(res / coeff) < std::numeric_limits::epsilon()) { + break; + } + } + return res; + } +} // namespace detail +} // namespace special diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ce0e9b923a5fb17c2df71e508ac670546fd5086 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcd4fffca2fe4897445ecb94cc8380ae9f4dee14 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..278542049cbbb09413fa763e7af9dbc11c507e6a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e75b00a84eba5d444e752467180674f9547f31e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec417ada63eb937d0cff89b327ef161206bb765a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d6f9ab9988bd3dc7bb01ff1a8ac1904c223d92e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b3a405c0911f95c595e7d1fc9a8a001bb000c87 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spence.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00b98259776fed113aab6046eed4b39844b19cbf Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spence.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ufunc_signatures.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ufunc_signatures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6f49307c2d5e070420e730d11586310d6caf757 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ufunc_signatures.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/data/__init__.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8000474296204b9a6035848b232cc8f60a21c7b7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..08c97dad3fff673386c480057c109d30cfd39f27 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h new file mode 100644 index 0000000000000000000000000000000000000000..34db094ca6ef02628a1b518b66b2bf302ace05a1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () +inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..f64c6ff2844685285187c0904ae29ee5e5774fdc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) +inline ::std::tuple _weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm_interface::call(v, g, dim); +} + +// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _weight_norm_interface_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm_interface_out::call(v, g, dim, out0, out1); +} +// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _weight_norm_interface_outf(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_weight_norm_interface_out::call(v, g, dim, out0, out1); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..adc2ea9f1dc3375395900d7061b0e529643d218c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h new file mode 100644 index 0000000000000000000000000000000000000000..facda915992f5826525c556e7ee9725960d269e2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arctanh(Tensor self) -> Tensor +inline at::Tensor arctanh(const at::Tensor & self) { + return at::_ops::arctanh::call(self); +} + +// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arctanh_(at::Tensor & self) { + return at::_ops::arctanh_::call(self); +} + +// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arctanh_out::call(self, out); +} +// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arctanh_out::call(self, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..67bf8e6c780f89f6609ead91b4675ccf97af1ed1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bitwise_right_shift_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_right_shift__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_right_shift_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API bitwise_right_shift_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API bitwise_right_shift__Tensor_Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API bitwise_right_shift_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API bitwise_right_shift_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_right_shift_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90e05ba52039c823736375263b44bd6106bddf98 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); +TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out); +TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8c26720867c36b21bdca8b0012c2e5f8ec8c5f07 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hypot_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API hypot { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API hypot_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d79393b23964c8a0760ef11d5bfcc9760f7dc52d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_inv(const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_backward_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a525c8561321c00085445f2bcfca2f288f60b1c2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3f6ad69ad11ce08e9112b547e09f3899b36226fc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask); + } +} + +// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_layer_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a81b814bde4a3c77f00dde1923e620230f02865 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & pixel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t upscale_factor); +TORCH_API at::Tensor & pixel_shuffle_outf(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6672ec0739c14e88e7e7eb2cbf94c98dc6f0ef86 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantized_rnn_relu_cell { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantized_rnn_relu_cell") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_native.h new file mode 100644 index 0000000000000000000000000000000000000000..95125dce90a3cd9d06840c40653e2ea150eff7a8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor slice(const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f013747c084c3db2399f9e117554b5319bebca87 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_erfinv(const at::Tensor & self); +TORCH_API at::Tensor & special_erfinv_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_erfinv_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split.h new file mode 100644 index 0000000000000000000000000000000000000000..887106b0d5f56ed39bb0ee1ee92f76768d56dbc0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector split(const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_Tensor::call(self, split_size, dim); +} +namespace symint { + template ::value>> + ::std::vector split(const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_Tensor::call(self, split_size, dim); + } +} + +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_Tensor::call(self, split_size, dim); +} +namespace symint { + template ::value>> + ::std::vector split(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) { + return at::_ops::split_Tensor::call(self, split_size, dim); + } +} + +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::call(self, c10::fromIntArrayRefSlow(split_size), dim); +} +namespace symint { + template ::value>> + ::std::vector split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::call(self, c10::fromIntArrayRefSlow(split_size), dim); + } +} + +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector split_symint(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::call(self, split_size, dim); +} +namespace symint { + template ::value>> + ::std::vector split(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::call(self, split_size, dim); + } +} + +} diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63641fa4f129213526f36388a12cdc94852c0744 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/__main__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f5a3c41eb318792965e021c83d245e0ca94c49b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/__main__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/cli.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a96d948709b19da32c28d79b6536fbffbe23aa Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/cli.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/ipython.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/ipython.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee122f05ed846d6a02819450e77b2b3ddb3adfaa Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/ipython.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/main.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e269ef0b95dfe2bed30d2be5d89ed3dd3f9435 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/main.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/parser.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84b4bfde90c6c6043a5676c3af68ebf5d662d4f6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/parser.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/variables.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/variables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..815ccc1cdc86cba1d0a9e424276c2496dee1bd43 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/variables.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/dotenv/__pycache__/version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18e0a279f694595e294a860d80a861bb063b8110 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/dotenv/__pycache__/version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/multiprocess/__info__.py b/vllm/lib/python3.10/site-packages/multiprocess/__info__.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb75d49504fab7a43ef9bb21cf637dd7a7cd5ef --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/__info__.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE +''' +----------------------------------------------------------------- +multiprocess: better multiprocessing and multithreading in Python +----------------------------------------------------------------- + +About Multiprocess +================== + +``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6. + +``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing. +``multiprocess`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``multiprocess`` enables: + + - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues + - objects to be shared between processes using a server process or (for simple data) shared memory + +``multiprocess`` provides: + + - equivalents of all the synchronization primitives in ``threading`` + - a ``Pool`` class to facilitate submitting tasks to worker processes + - enhanced serialization, using ``dill`` + + +Current Release +=============== + +The latest released version of ``multiprocess`` is available from: + + https://pypi.org/project/multiprocess + +``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``multiprocess`` can be installed with ``pip``:: + + $ pip install multiprocess + +For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler. + + +Requirements +============ + +``multiprocess`` requires: + + - ``python`` (or ``pypy``), **>=3.8** + - ``setuptools``, **>=42** + - ``dill``, **>=0.3.8** + + +Basic Usage +=========== + +The ``multiprocess.Process`` class follows the API of ``threading.Thread``. +For example :: + + from multiprocess import Process, Queue + + def f(q): + q.put('hello world') + + if __name__ == '__main__': + q = Queue() + p = Process(target=f, args=[q]) + p.start() + print (q.get()) + p.join() + +Synchronization primitives like locks, semaphores and conditions are +available, for example :: + + >>> from multiprocess import Condition + >>> c = Condition() + >>> print (c) + ), 0> + >>> c.acquire() + True + >>> print (c) + ), 0> + +One can also use a manager to create shared objects either in shared +memory or in a server process, for example :: + + >>> from multiprocess import Manager + >>> manager = Manager() + >>> l = manager.list(range(10)) + >>> l.reverse() + >>> print (l) + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + >>> print (repr(l)) + + +Tasks can be offloaded to a pool of worker processes in various ways, +for example :: + + >>> from multiprocess import Pool + >>> def f(x): return x*x + ... + >>> p = Pool(4) + >>> result = p.map_async(f, range(10)) + >>> print (result.get(timeout=1)) + [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] + +When ``dill`` is installed, serialization is extended to most objects, +for example :: + + >>> from multiprocess import Pool + >>> p = Pool(4) + >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10))) + [0, 2, 6, 12, 20, 30, 42, 56, 72, 90] + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that +demonstrate how ``multiprocess`` can be used to leverge multiple processes +to execute Python in parallel. You can run the test suite with +``python -m multiprocess.tests``. As ``multiprocess`` conforms to the +``multiprocessing`` interface, the examples and documentation found at +http://docs.python.org/library/multiprocessing.html also apply to +``multiprocess`` if one will ``import multiprocessing as multiprocess``. +See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples +for a set of examples that demonstrate some basic use cases and benchmarking +for running Python code in parallel. Please feel free to submit a ticket on +github, or ask a question on stackoverflow (**@Mike McKerns**). If you would +like to share how you use ``multiprocess`` in your work, please send an email +(to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``multiprocess`` to do research that leads to publication, we ask that you +acknowledge use of ``multiprocess`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. + +''' + +__all__ = [] +__version__ = '0.70.16' +__author__ = 'Mike McKerns' + +__license__ = ''' +Copyright (c) 2008-2016 California Institute of Technology. +Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +All rights reserved. + +This software forks the python package "multiprocessing". Licence and +copyright information for multiprocessing can be found in "COPYING". + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' diff --git a/vllm/lib/python3.10/site-packages/multiprocess/connection.py b/vllm/lib/python3.10/site-packages/multiprocess/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..aa23c04ac0ac8cd3264e28b7721360a7fb20516e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/connection.py @@ -0,0 +1,976 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] + +import io +import os +import sys +import socket +import struct +import time +import tempfile +import itertools + +try: + import _multiprocess as _multiprocessing +except ImportError: + import _multiprocessing + +from . import util + +from . import AuthenticationError, BufferTooShort +from .context import reduction +_ForkingPickler = reduction.ForkingPickler + +try: + import _winapi + from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +BUFSIZE = 8192 +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return getattr(time,'monotonic',time.time)() + timeout + +def _check_timeout(t): + return getattr(time,'monotonic',time.time)() > t + +# +# +# + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) + elif family == 'AF_PIPE': + return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % + (os.getpid(), next(_mmap_counter)), dir="") + else: + raise ValueError('unrecognized family') + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str or util.is_abstract_socket_namespace(address): + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) + if m.itemsize > 1: + m = memoryview(bytes(m)) + n = len(m) + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(_ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto(m[offset // itemsize : + (offset + size) // itemsize]) + return size + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return _ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + + def _close(self, _CloseHandle=_winapi.CloseHandle): + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nwritten, err = ov.GetOverlappedResult(True) + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return f + elif err == _winapi.ERROR_MORE_DATA: + return self._get_more_data(ov, maxsize) + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): + _close(self._handle) + _write = os.write + _read = os.read + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + n = write(self._handle, buf) + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + chunk = read(handle, remaining) + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + n = len(buf) + if n > 0x7fffffff: + pre_header = struct.pack("!i", -1) + header = struct.pack("!Q", n) + self._send(pre_header) + self._send(header) + self._send(buf) + else: + # For wire compatibility with 3.7 and lower + header = struct.pack("!i", n) + if n > 16384: + # The payload is large so Nagle's algorithm won't be triggered + # and we'd better avoid the cost of concatenation. + self._send(header) + self._send(buf) + else: + # Issue #20540: concatenate before sending, to avoid delays due + # to Nagle's algorithm on a TCP socket. + # Also note we want to avoid sending a 0-length buffer separately, + # to avoid "broken pipe" errors if the other end closed the pipe. + self._send(header + buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if size == -1: + buf = self._recv(8) + size, = struct.unpack("!Q", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = family or (address and address_type(address)) \ + or default_family + address = address or arbitrary_address(family) + + _validate_family(family) + if family == 'AF_PIPE': + self._listener = PipeListener(address, backlog) + else: + self._listener = SocketListener(address, family, backlog) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + c = self._listener.accept() + if self._authkey: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + listener = self._listener + if listener is not None: + self._listener = None + listener.close() + + @property + def address(self): + return self._listener._address + + @property + def last_accepted(self): + return self._listener._last_accepted + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(True) + s2.setblocking(True) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + address = arbitrary_address('AF_PIPE') + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, + # default security descriptor: the handle cannot be inherited + _winapi.NULL + ) + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): + # Linux abstract socket namespaces do not need to be explicitly unlinked + self._unlink = util.Finalize( + self, os.unlink, args=(address,), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + s, self._last_accepted = self._socket.accept() + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + try: + self._socket.close() + finally: + unlink = self._unlink + if unlink is not None: + self._unlink = None + unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket( getattr(socket, family) ) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + res = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + util.sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 20 + +CHALLENGE = b'#CHALLENGE#' +WELCOME = b'#WELCOME#' +FAILURE = b'#FAILURE#' + +def deliver_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = os.urandom(MESSAGE_LENGTH) + connection.send_bytes(CHALLENGE + message) + digest = hmac.new(authkey, message, 'md5').digest() + response = connection.recv_bytes(256) # reject large message + if response == digest: + connection.send_bytes(WELCOME) + else: + connection.send_bytes(FAILURE) + raise AuthenticationError('digest received was wrong') + +def answer_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = connection.recv_bytes(256) # reject large message + assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message + message = message[len(CHALLENGE):] + digest = hmac.new(authkey, message, 'md5').digest() + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + +class ConnectionWrapper(object): + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) + return obj + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + while L: + res = _winapi.WaitForMultipleObjects(L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + ov, err = None, e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed... + if ov and sys.getwindowsversion()[:2] >= (6, 2): + # ... except on Windows 8 and later, where + # the message HAS been consumed. + try: + _, err = ov.GetOverlappedResult(False) + except OSError as e: + err = e.winerror + if not err and hasattr(o, '_got_empty_message'): + o._got_empty_message = True + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + import selectors + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _WaitSelector = selectors.PollSelector + else: + _WaitSelector = selectors.SelectSelector + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + with _WaitSelector() as selector: + for obj in object_list: + selector.register(obj, selectors.EVENT_READ) + + if timeout is not None: + deadline = getattr(time,'monotonic',time.time)() + timeout + + while True: + ready = selector.select(timeout) + if ready: + return [key.fileobj for (key, events) in ready] + else: + if timeout is not None: + timeout = deadline - getattr(time,'monotonic',time.time)() + if timeout < 0: + return ready + +# +# Make connection and socket objects sharable if possible +# + +if sys.platform == 'win32': + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + +else: + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff --git a/vllm/lib/python3.10/site-packages/multiprocess/forkserver.py b/vllm/lib/python3.10/site-packages/multiprocess/forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..f988506e1a6f30f04d5680f62bf9ae965f993254 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/forkserver.py @@ -0,0 +1,347 @@ +import errno +import os +import selectors +import signal +import socket +import struct +import sys +import threading +import warnings + +from . import connection +from . import process +from .context import reduction +from . import resource_tracker +from . import spawn +from . import util + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', + 'set_forkserver_preload'] + +# +# +# + +MAXFDS_TO_SEND = 256 +SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t + +# +# Forkserver class +# + +class ForkServer(object): + + def __init__(self): + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + self._inherited_fds = None + self._lock = threading.Lock() + self._preload_modules = ['__main__'] + + def _stop(self): + # Method used by unit tests to stop the server + with self._lock: + self._stop_unlocked() + + def _stop_unlocked(self): + if self._forkserver_pid is None: + return + + # close the "alive" file descriptor asks the server to stop + os.close(self._forkserver_alive_fd) + self._forkserver_alive_fd = None + + os.waitpid(self._forkserver_pid, 0) + self._forkserver_pid = None + + if not util.is_abstract_socket_namespace(self._forkserver_address): + os.unlink(self._forkserver_address) + self._forkserver_address = None + + def set_forkserver_preload(self, modules_names): + '''Set list of module names to try to load in forkserver process.''' + if not all(type(mod) is str for mod in self._preload_modules): + raise TypeError('module_names must be a list of strings') + self._preload_modules = modules_names + + def get_inherited_fds(self): + '''Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + ''' + return self._inherited_fds + + def connect_to_new_process(self, fds): + '''Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + ''' + self.ensure_running() + if len(fds) + 4 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + with socket.socket(socket.AF_UNIX) as client: + client.connect(self._forkserver_address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, self._forkserver_alive_fd, + resource_tracker.getfd()] + allfds += fds + try: + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + + def ensure_running(self): + '''Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + ''' + with self._lock: + resource_tracker.ensure_running() + if self._forkserver_pid is not None: + # forkserver was launched before, is it still running? + pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) + if not pid: + # still alive + return + # dead, launch it again + os.close(self._forkserver_alive_fd) + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + + cmd = ('from multiprocess.forkserver import main; ' + + 'main(%d, %d, %r, **%r)') + + if self._preload_modules: + desired_keys = {'main_path', 'sys_path'} + data = spawn.get_preparation_data('ignore') + data = {x: y for x, y in data.items() if x in desired_keys} + else: + data = {} + + with socket.socket(socket.AF_UNIX) as listener: + address = connection.arbitrary_address('AF_UNIX') + listener.bind(address) + if not util.is_abstract_socket_namespace(address): + os.chmod(address, 0o600) + listener.listen() + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + alive_r, alive_w = os.pipe() + try: + fds_to_pass = [listener.fileno(), alive_r] + cmd %= (listener.fileno(), alive_r, self._preload_modules, + data) + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd] + pid = util.spawnv_passfds(exe, args, fds_to_pass) + except: + os.close(alive_w) + raise + finally: + os.close(alive_r) + self._forkserver_address = address + self._forkserver_alive_fd = alive_w + self._forkserver_pid = pid + +# +# +# + +def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): + '''Run forkserver.''' + if preload: + if '__main__' in preload and main_path is not None: + process.current_process()._inheriting = True + try: + spawn.import_main_path(main_path) + finally: + del process.current_process()._inheriting + for modname in preload: + try: + __import__(modname) + except ImportError: + pass + + util._close_stdin() + + sig_r, sig_w = os.pipe() + os.set_blocking(sig_r, False) + os.set_blocking(sig_w, False) + + def sigchld_handler(*_unused): + # Dummy signal handler, doesn't do anything + pass + + handlers = { + # unblocking SIGCHLD allows the wakeup fd to notify our event loop + signal.SIGCHLD: sigchld_handler, + # protect the process from ^C + signal.SIGINT: signal.SIG_IGN, + } + old_handlers = {sig: signal.signal(sig, val) + for (sig, val) in handlers.items()} + + # calling os.write() in the Python signal handler is racy + signal.set_wakeup_fd(sig_w) + + # map child pids to client fds + pid_to_fd = {} + + with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ + selectors.DefaultSelector() as selector: + _forkserver._forkserver_address = listener.getsockname() + + selector.register(listener, selectors.EVENT_READ) + selector.register(alive_r, selectors.EVENT_READ) + selector.register(sig_r, selectors.EVENT_READ) + + while True: + try: + while True: + rfds = [key.fileobj for (key, events) in selector.select()] + if rfds: + break + + if alive_r in rfds: + # EOF because no more client processes left + assert os.read(alive_r, 1) == b'', "Not at EOF?" + raise SystemExit + + if sig_r in rfds: + # Got SIGCHLD + os.read(sig_r, 65536) # exhaust + while True: + # Scan for child processes + try: + pid, sts = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + break + if pid == 0: + break + child_w = pid_to_fd.pop(pid, None) + if child_w is not None: + returncode = os.waitstatus_to_exitcode(sts) + # Send exit code to client process + try: + write_signed(child_w, returncode) + except BrokenPipeError: + # client vanished + pass + os.close(child_w) + else: + # This shouldn't happen really + warnings.warn('forkserver: waitpid returned ' + 'unexpected pid %d' % pid) + + if listener in rfds: + # Incoming fork request + with listener.accept()[0] as s: + # Receive fds from client + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + if len(fds) > MAXFDS_TO_SEND: + raise RuntimeError( + "Too many ({0:n}) fds to send".format( + len(fds))) + child_r, child_w, *fds = fds + s.close() + pid = os.fork() + if pid == 0: + # Child + code = 1 + try: + listener.close() + selector.close() + unused_fds = [alive_r, child_w, sig_r, sig_w] + unused_fds.extend(pid_to_fd.values()) + code = _serve_one(child_r, fds, + unused_fds, + old_handlers) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + os._exit(code) + else: + # Send pid to client process + try: + write_signed(child_w, pid) + except BrokenPipeError: + # client vanished + pass + pid_to_fd[pid] = child_w + os.close(child_r) + for fd in fds: + os.close(fd) + + except OSError as e: + if e.errno != errno.ECONNABORTED: + raise + + +def _serve_one(child_r, fds, unused_fds, handlers): + # close unnecessary stuff and reset signal handlers + signal.set_wakeup_fd(-1) + for sig, val in handlers.items(): + signal.signal(sig, val) + for fd in unused_fds: + os.close(fd) + + (_forkserver._forkserver_alive_fd, + resource_tracker._resource_tracker._fd, + *_forkserver._inherited_fds) = fds + + # Run process object received over pipe + parent_sentinel = os.dup(child_r) + code = spawn._main(child_r, parent_sentinel) + + return code + + +# +# Read and write signed numbers +# + +def read_signed(fd): + data = b'' + length = SIGNED_STRUCT.size + while len(data) < length: + s = os.read(fd, length - len(data)) + if not s: + raise EOFError('unexpected EOF') + data += s + return SIGNED_STRUCT.unpack(data)[0] + +def write_signed(fd, n): + msg = SIGNED_STRUCT.pack(n) + while msg: + nbytes = os.write(fd, msg) + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# +# + +_forkserver = ForkServer() +ensure_running = _forkserver.ensure_running +get_inherited_fds = _forkserver.get_inherited_fds +connect_to_new_process = _forkserver.connect_to_new_process +set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/vllm/lib/python3.10/site-packages/multiprocess/heap.py b/vllm/lib/python3.10/site-packages/multiprocess/heap.py new file mode 100644 index 0000000000000000000000000000000000000000..6217dfe12689b379f2dad6f1e4bc3bbf6af8f60a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/heap.py @@ -0,0 +1,337 @@ +# +# Module which supports allocation of memory from an mmap +# +# multiprocessing/heap.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import bisect +from collections import defaultdict +import mmap +import os +import sys +import tempfile +import threading + +from .context import reduction, assert_spawning +from . import util + +__all__ = ['BufferWrapper'] + +# +# Inheritable class which wraps an mmap, and from which blocks can be allocated +# + +if sys.platform == 'win32': + + import _winapi + + class Arena(object): + """ + A shared memory area backed by anonymous memory (Windows). + """ + + _rand = tempfile._RandomNameSequence() + + def __init__(self, size): + self.size = size + for i in range(100): + name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) + buf = mmap.mmap(-1, size, tagname=name) + if _winapi.GetLastError() == 0: + break + # We have reopened a preexisting mmap. + buf.close() + else: + raise FileExistsError('Cannot find name for new mmap') + self.name = name + self.buffer = buf + self._state = (self.size, self.name) + + def __getstate__(self): + assert_spawning(self) + return self._state + + def __setstate__(self, state): + self.size, self.name = self._state = state + # Reopen existing mmap + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + # XXX Temporarily preventing buildbot failures while determining + # XXX the correct long-term fix. See issue 23060 + #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS + +else: + + class Arena(object): + """ + A shared memory area backed by a temporary file (POSIX). + """ + + if sys.platform == 'linux': + _dir_candidates = ['/dev/shm'] + else: + _dir_candidates = [] + + def __init__(self, size, fd=-1): + self.size = size + self.fd = fd + if fd == -1: + # Arena is created anew (if fd != -1, it means we're coming + # from rebuild_arena() below) + self.fd, name = tempfile.mkstemp( + prefix='pym-%d-'%os.getpid(), + dir=self._choose_dir(size)) + os.unlink(name) + util.Finalize(self, os.close, (self.fd,)) + os.ftruncate(self.fd, size) + self.buffer = mmap.mmap(self.fd, self.size) + + def _choose_dir(self, size): + # Choose a non-storage backed directory if possible, + # to improve performance + for d in self._dir_candidates: + st = os.statvfs(d) + if st.f_bavail * st.f_frsize >= size: # enough free space? + return d + return util.get_temp_dir() + + def reduce_arena(a): + if a.fd == -1: + raise ValueError('Arena is unpicklable because ' + 'forking was enabled when it was created') + return rebuild_arena, (a.size, reduction.DupFd(a.fd)) + + def rebuild_arena(size, dupfd): + return Arena(size, dupfd.detach()) + + reduction.register(Arena, reduce_arena) + +# +# Class allowing allocation of chunks of memory from arenas +# + +class Heap(object): + + # Minimum malloc() alignment + _alignment = 8 + + _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB + _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 + + def __init__(self, size=mmap.PAGESIZE): + self._lastpid = os.getpid() + self._lock = threading.Lock() + # Current arena allocation size + self._size = size + # A sorted list of available block sizes in arenas + self._lengths = [] + + # Free block management: + # - map each block size to a list of `(Arena, start, stop)` blocks + self._len_to_seq = {} + # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block + # starting at that offset + self._start_to_block = {} + # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block + # ending at that offset + self._stop_to_block = {} + + # Map arenas to their `(Arena, start, stop)` blocks in use + self._allocated_blocks = defaultdict(set) + self._arenas = [] + + # List of pending blocks to free - see comment in free() below + self._pending_free_blocks = [] + + # Statistics + self._n_mallocs = 0 + self._n_frees = 0 + + @staticmethod + def _roundup(n, alignment): + # alignment must be a power of 2 + mask = alignment - 1 + return (n + mask) & ~mask + + def _new_arena(self, size): + # Create a new arena with at least the given *size* + length = self._roundup(max(self._size, size), mmap.PAGESIZE) + # We carve larger and larger arenas, for efficiency, until we + # reach a large-ish size (roughly L3 cache-sized) + if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: + self._size *= 2 + util.info('allocating a new mmap of length %d', length) + arena = Arena(length) + self._arenas.append(arena) + return (arena, 0, length) + + def _discard_arena(self, arena): + # Possibly delete the given (unused) arena + length = arena.size + # Reusing an existing arena is faster than creating a new one, so + # we only reclaim space if it's large enough. + if length < self._DISCARD_FREE_SPACE_LARGER_THAN: + return + blocks = self._allocated_blocks.pop(arena) + assert not blocks + del self._start_to_block[(arena, 0)] + del self._stop_to_block[(arena, length)] + self._arenas.remove(arena) + seq = self._len_to_seq[length] + seq.remove((arena, 0, length)) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + def _malloc(self, size): + # returns a large enough block -- it might be much larger + i = bisect.bisect_left(self._lengths, size) + if i == len(self._lengths): + return self._new_arena(size) + else: + length = self._lengths[i] + seq = self._len_to_seq[length] + block = seq.pop() + if not seq: + del self._len_to_seq[length], self._lengths[i] + + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + return block + + def _add_free_block(self, block): + # make block available and try to merge with its neighbours in the arena + (arena, start, stop) = block + + try: + prev_block = self._stop_to_block[(arena, start)] + except KeyError: + pass + else: + start, _ = self._absorb(prev_block) + + try: + next_block = self._start_to_block[(arena, stop)] + except KeyError: + pass + else: + _, stop = self._absorb(next_block) + + block = (arena, start, stop) + length = stop - start + + try: + self._len_to_seq[length].append(block) + except KeyError: + self._len_to_seq[length] = [block] + bisect.insort(self._lengths, length) + + self._start_to_block[(arena, start)] = block + self._stop_to_block[(arena, stop)] = block + + def _absorb(self, block): + # deregister this block so it can be merged with a neighbour + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + + length = stop - start + seq = self._len_to_seq[length] + seq.remove(block) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + return start, stop + + def _remove_allocated_block(self, block): + arena, start, stop = block + blocks = self._allocated_blocks[arena] + blocks.remove((start, stop)) + if not blocks: + # Arena is entirely free, discard it from this process + self._discard_arena(arena) + + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held. + while True: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break + self._add_free_block(block) + self._remove_allocated_block(block) + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under CPython it's atomic thanks to the GIL). + if os.getpid() != self._lastpid: + raise ValueError( + "My pid ({0:n}) is not last pid {1:n}".format( + os.getpid(),self._lastpid)) + if not self._lock.acquire(False): + # can't acquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._n_frees += 1 + self._free_pending_blocks() + self._add_free_block(block) + self._remove_allocated_block(block) + finally: + self._lock.release() + + def malloc(self, size): + # return a block of right size (possibly rounded up) + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + if os.getpid() != self._lastpid: + self.__init__() # reinitialize after fork + with self._lock: + self._n_mallocs += 1 + # allow pending blocks to be marked available + self._free_pending_blocks() + size = self._roundup(max(size, 1), self._alignment) + (arena, start, stop) = self._malloc(size) + real_stop = start + size + if real_stop < stop: + # if the returned block is larger than necessary, mark + # the remainder available + self._add_free_block((arena, real_stop, stop)) + self._allocated_blocks[arena].add((start, real_stop)) + return (arena, start, real_stop) + +# +# Class wrapping a block allocated out of a Heap -- can be inherited by child process +# + +class BufferWrapper(object): + + _heap = Heap() + + def __init__(self, size): + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + block = BufferWrapper._heap.malloc(size) + self._state = (block, size) + util.Finalize(self, BufferWrapper._heap.free, args=(block,)) + + def create_memoryview(self): + (arena, start, stop), size = self._state + return memoryview(arena.buffer)[start:start+size] diff --git a/vllm/lib/python3.10/site-packages/multiprocess/popen_fork.py b/vllm/lib/python3.10/site-packages/multiprocess/popen_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7c52d589fea22aad4b2ff4ba969db442a9ec1c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/popen_fork.py @@ -0,0 +1,83 @@ +import os +import signal + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + util._flush_std_streams() + self.returncode = None + self.finalizer = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + self.returncode = os.waitstatus_to_exitcode(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from multiprocess.connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def _send_signal(self, sig): + if self.returncode is None: + try: + os.kill(self.pid, sig) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def terminate(self): + self._send_signal(signal.SIGTERM) + + def kill(self): + self._send_signal(signal.SIGKILL) + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + os.close(parent_r) + os.close(parent_w) + code = process_obj._bootstrap(parent_sentinel=child_r) + finally: + os._exit(code) + else: + os.close(child_w) + os.close(child_r) + self.finalizer = util.Finalize(self, util.close_fds, + (parent_r, parent_w,)) + self.sentinel = parent_r + + def close(self): + if self.finalizer is not None: + self.finalizer() diff --git a/vllm/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py b/vllm/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4098d0fa4f1e6e3ec94ecc8e596dd3857d741f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/popen_spawn_win32.py @@ -0,0 +1,131 @@ +import os +import msvcrt +import signal +import sys +import _winapi + +from .context import reduction, get_spawning_popen, set_spawning_popen +from . import spawn +from . import util + +__all__ = ['Popen'] + +# +# +# + +TERMINATE = 0x10000 +WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) +WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + +WINENV = not _path_eq(sys.executable, sys._base_executable) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + +class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + method = 'spawn' + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = spawn.get_command_line(parent_pid=os.getpid(), + pipe_handle=rhandle) + cmd = ' '.join('"%s"' % x for x in cmd) + + python_exe = spawn.get_executable() + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + python_exe = sys._base_executable + env = os.environ.copy() + env["__PYVENV_LAUNCHER__"] = sys.executable + else: + env = None + + with open(wfd, 'wb', closefd=True) as to_child: + # start process + try: + hp, ht, pid, tid = _winapi.CreateProcess( + python_exe, cmd, + None, None, False, 0, env, None, None) + _winapi.CloseHandle(ht) + except: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize(self, _close_handles, + (self.sentinel, int(rhandle))) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + def duplicate_for_child(self, handle): + assert self is get_spawning_popen() + return reduction.duplicate(handle, self.sentinel) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _winapi.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _winapi.WaitForSingleObject(int(self._handle), msecs) + if res == _winapi.WAIT_OBJECT_0: + code = _winapi.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _winapi.TerminateProcess(int(self._handle), TERMINATE) + except OSError: + if self.wait(timeout=1.0) is None: + raise + + kill = terminate + + def close(self): + self.finalizer() diff --git a/vllm/lib/python3.10/site-packages/multiprocess/process.py b/vllm/lib/python3.10/site-packages/multiprocess/process.py new file mode 100644 index 0000000000000000000000000000000000000000..4c887e3b164600f9fee2993315ad8fc2f85f29c8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/process.py @@ -0,0 +1,438 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if p._popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + self._after_fork() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + try: + self.run() + exitcode = 0 + finally: + util._exit_function() + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + + @staticmethod + def _after_fork(): + from . import util + util._finalizer_registry.clear() + util._run_after_forkers() + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocess.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocess.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' + +# For debug and leak testing +_dangling = WeakSet() diff --git a/vllm/lib/python3.10/site-packages/multiprocess/reduction.py b/vllm/lib/python3.10/site-packages/multiprocess/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..39b132c5e17067a215184866a25654b70b7bea1f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/reduction.py @@ -0,0 +1,284 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from abc import ABCMeta +import copyreg +import functools +import io +import os +try: + import dill as pickle +except ImportError: + import pickle +import socket +import sys + +from . import context + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocess.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args, **kwds): + super().__init__(*args, **kwds) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None, *args, **kwds): + buf = io.BytesIO() + cls(buf, protocol, *args, **kwds).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None, *args, **kwds): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol, *args, **kwds).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False, + *, source_process=None): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + current_process = _winapi.GetCurrentProcess() + if source_process is None: + source_process = current_process + if target_process is None: + target_process = current_process + return _winapi.DuplicateHandle( + source_process, handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + # On MacOSX we should acknowledge receipt of fds -- see Issue14669 + ACKNOWLEDGE = sys.platform == 'darwin' + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + if len(a) % 256 != msg[0]: + raise AssertionError( + "Len is {0:n} but msg[0] is {1!r}".format( + len(a), msg[0])) + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = context.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') + +# +# Try making some callable types picklable +# + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) + + +class AbstractReducer(metaclass=ABCMeta): + '''Abstract base class for use in implementing a Reduction class + suitable for use in replacing the standard reduction mechanism + used in multiprocess.''' + ForkingPickler = ForkingPickler + register = register + dump = dump + send_handle = send_handle + recv_handle = recv_handle + + if sys.platform == 'win32': + steal_handle = steal_handle + duplicate = duplicate + DupHandle = DupHandle + else: + sendfds = sendfds + recvfds = recvfds + DupFd = DupFd + + _reduce_method = _reduce_method + _reduce_method_descriptor = _reduce_method_descriptor + _rebuild_partial = _rebuild_partial + _reduce_socket = _reduce_socket + _rebuild_socket = _rebuild_socket + + def __init__(self, *args): + register(type(_C().f), _reduce_method) + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + register(functools.partial, _reduce_partial) + register(socket.socket, _reduce_socket) diff --git a/vllm/lib/python3.10/site-packages/multiprocess/resource_sharer.py b/vllm/lib/python3.10/site-packages/multiprocess/resource_sharer.py new file mode 100644 index 0000000000000000000000000000000000000000..66076509a1202e7a1b4d8a481f64621a4bfbbf3e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/resource_sharer.py @@ -0,0 +1,154 @@ +# +# We use a background thread for sharing fds on Unix, and for sharing sockets on +# Windows. +# +# A client which wants to pickle a resource registers it with the resource +# sharer and gets an identifier in return. The unpickling process will connect +# to the resource sharer, sends the identifier and its pid, and then receives +# the resource. +# + +import os +import signal +import socket +import sys +import threading + +from . import process +from .context import reduction +from . import util + +__all__ = ['stop'] + + +if sys.platform == 'win32': + __all__ += ['DupSocket'] + + class DupSocket(object): + '''Picklable wrapper for a socket.''' + def __init__(self, sock): + new_sock = sock.dup() + def send(conn, pid): + share = new_sock.share(pid) + conn.send_bytes(share) + self._id = _resource_sharer.register(send, new_sock.close) + + def detach(self): + '''Get the socket. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + share = conn.recv_bytes() + return socket.fromshare(share) + +else: + __all__ += ['DupFd'] + + class DupFd(object): + '''Wrapper for fd which can be used at any time.''' + def __init__(self, fd): + new_fd = os.dup(fd) + def send(conn, pid): + reduction.send_handle(conn, new_fd, pid) + def close(): + os.close(new_fd) + self._id = _resource_sharer.register(send, close) + + def detach(self): + '''Get the fd. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + return reduction.recv_handle(conn) + + +class _ResourceSharer(object): + '''Manager for resources using background thread.''' + def __init__(self): + self._key = 0 + self._cache = {} + self._lock = threading.Lock() + self._listener = None + self._address = None + self._thread = None + util.register_after_fork(self, _ResourceSharer._afterfork) + + def register(self, send, close): + '''Register resource, returning an identifier.''' + with self._lock: + if self._address is None: + self._start() + self._key += 1 + self._cache[self._key] = (send, close) + return (self._address, self._key) + + @staticmethod + def get_connection(ident): + '''Return connection from which to receive identified resource.''' + from .connection import Client + address, key = ident + c = Client(address, authkey=process.current_process().authkey) + c.send((key, os.getpid())) + return c + + def stop(self, timeout=None): + '''Stop the background thread and clear registered resources.''' + from .connection import Client + with self._lock: + if self._address is not None: + c = Client(self._address, + authkey=process.current_process().authkey) + c.send(None) + c.close() + self._thread.join(timeout) + if self._thread.is_alive(): + util.sub_warning('_ResourceSharer thread did ' + 'not stop when asked') + self._listener.close() + self._thread = None + self._address = None + self._listener = None + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + + def _afterfork(self): + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + self._lock._at_fork_reinit() + if self._listener is not None: + self._listener.close() + self._listener = None + self._address = None + self._thread = None + + def _start(self): + from .connection import Listener + assert self._listener is None, "Already have Listener" + util.debug('starting listener and thread for sending handles') + self._listener = Listener(authkey=process.current_process().authkey) + self._address = self._listener.address + t = threading.Thread(target=self._serve) + t.daemon = True + t.start() + self._thread = t + + def _serve(self): + if hasattr(signal, 'pthread_sigmask'): + signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) + while 1: + try: + with self._listener.accept() as conn: + msg = conn.recv() + if msg is None: + break + key, destination_pid = msg + send, close = self._cache.pop(key) + try: + send(conn, destination_pid) + finally: + close() + except: + if not util.is_exiting(): + sys.excepthook(*sys.exc_info()) + + +_resource_sharer = _ResourceSharer() +stop = _resource_sharer.stop diff --git a/vllm/lib/python3.10/site-packages/multiprocess/shared_memory.py b/vllm/lib/python3.10/site-packages/multiprocess/shared_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1e5aa17b87a232a831b5d250b8e0c2f15f574d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/shared_memory.py @@ -0,0 +1,534 @@ +"""Provides shared memory for direct access across processes. + +The API of this package is currently provisional. Refer to the +documentation for details. +""" + + +__all__ = [ 'SharedMemory', 'ShareableList' ] + + +from functools import partial +import mmap +import os +import errno +import struct +import secrets +import types + +if os.name == "nt": + import _winapi + _USE_POSIX = False +else: + import _posixshmem + _USE_POSIX = True + +from . import resource_tracker + +_O_CREX = os.O_CREAT | os.O_EXCL + +# FreeBSD (and perhaps other BSDs) limit names to 14 characters. +_SHM_SAFE_NAME_LENGTH = 14 + +# Shared memory block name prefix +if _USE_POSIX: + _SHM_NAME_PREFIX = '/psm_' +else: + _SHM_NAME_PREFIX = 'wnsm_' + + +def _make_filename(): + "Create a random filename for the shared memory object." + # number of random bytes to use for name + nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 + assert nbytes >= 2, '_SHM_NAME_PREFIX too long' + name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) + assert len(name) <= _SHM_SAFE_NAME_LENGTH + return name + + +class SharedMemory: + """Creates a new shared memory block or attaches to an existing + shared memory block. + + Every shared memory block is assigned a unique name. This enables + one process to create a shared memory block with a particular name + so that a different process can attach to that same shared memory + block using that same name. + + As a resource for sharing data across processes, shared memory blocks + may outlive the original process that created them. When one process + no longer needs access to a shared memory block that might still be + needed by other processes, the close() method should be called. + When a shared memory block is no longer needed by any process, the + unlink() method should be called to ensure proper cleanup.""" + + # Defaults; enables close() and unlink() to run without errors. + _name = None + _fd = -1 + _mmap = None + _buf = None + _flags = os.O_RDWR + _mode = 0o600 + _prepend_leading_slash = True if _USE_POSIX else False + + def __init__(self, name=None, create=False, size=0): + if not size >= 0: + raise ValueError("'size' must be a positive integer") + if create: + self._flags = _O_CREX | os.O_RDWR + if size == 0: + raise ValueError("'size' must be a positive number different from zero") + if name is None and not self._flags & os.O_EXCL: + raise ValueError("'name' can only be None if create=True") + + if _USE_POSIX: + + # POSIX Shared Memory + + if name is None: + while True: + name = _make_filename() + try: + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + except FileExistsError: + continue + self._name = name + break + else: + name = "/" + name if self._prepend_leading_slash else name + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + self._name = name + try: + if create and size: + os.ftruncate(self._fd, size) + stats = os.fstat(self._fd) + size = stats.st_size + self._mmap = mmap.mmap(self._fd, size) + except OSError: + self.unlink() + raise + + resource_tracker.register(self._name, "shared_memory") + + else: + + # Windows Named Shared Memory + + if create: + while True: + temp_name = _make_filename() if name is None else name + # Create and reserve shared memory block with this name + # until it can be attached to by mmap. + h_map = _winapi.CreateFileMapping( + _winapi.INVALID_HANDLE_VALUE, + _winapi.NULL, + _winapi.PAGE_READWRITE, + (size >> 32) & 0xFFFFFFFF, + size & 0xFFFFFFFF, + temp_name + ) + try: + last_error_code = _winapi.GetLastError() + if last_error_code == _winapi.ERROR_ALREADY_EXISTS: + if name is not None: + raise FileExistsError( + errno.EEXIST, + os.strerror(errno.EEXIST), + name, + _winapi.ERROR_ALREADY_EXISTS + ) + else: + continue + self._mmap = mmap.mmap(-1, size, tagname=temp_name) + finally: + _winapi.CloseHandle(h_map) + self._name = temp_name + break + + else: + self._name = name + # Dynamically determine the existing named shared memory + # block's size which is likely a multiple of mmap.PAGESIZE. + h_map = _winapi.OpenFileMapping( + _winapi.FILE_MAP_READ, + False, + name + ) + try: + p_buf = _winapi.MapViewOfFile( + h_map, + _winapi.FILE_MAP_READ, + 0, + 0, + 0 + ) + finally: + _winapi.CloseHandle(h_map) + try: + size = _winapi.VirtualQuerySize(p_buf) + finally: + _winapi.UnmapViewOfFile(p_buf) + self._mmap = mmap.mmap(-1, size, tagname=name) + + self._size = size + self._buf = memoryview(self._mmap) + + def __del__(self): + try: + self.close() + except OSError: + pass + + def __reduce__(self): + return ( + self.__class__, + ( + self.name, + False, + self.size, + ), + ) + + def __repr__(self): + return f'{self.__class__.__name__}({self.name!r}, size={self.size})' + + @property + def buf(self): + "A memoryview of contents of the shared memory block." + return self._buf + + @property + def name(self): + "Unique name that identifies the shared memory block." + reported_name = self._name + if _USE_POSIX and self._prepend_leading_slash: + if self._name.startswith("/"): + reported_name = self._name[1:] + return reported_name + + @property + def size(self): + "Size in bytes." + return self._size + + def close(self): + """Closes access to the shared memory from this instance but does + not destroy the shared memory block.""" + if self._buf is not None: + self._buf.release() + self._buf = None + if self._mmap is not None: + self._mmap.close() + self._mmap = None + if _USE_POSIX and self._fd >= 0: + os.close(self._fd) + self._fd = -1 + + def unlink(self): + """Requests that the underlying shared memory block be destroyed. + + In order to ensure proper cleanup of resources, unlink should be + called once (and only once) across all processes which have access + to the shared memory block.""" + if _USE_POSIX and self._name: + _posixshmem.shm_unlink(self._name) + resource_tracker.unregister(self._name, "shared_memory") + + +_encoding = "utf8" + +class ShareableList: + """Pattern for a mutable list-like object shareable via a shared + memory block. It differs from the built-in list type in that these + lists can not change their overall length (i.e. no append, insert, + etc.) + + Because values are packed into a memoryview as bytes, the struct + packing format for any storable value must require no more than 8 + characters to describe its format.""" + + # The shared memory area is organized as follows: + # - 8 bytes: number of items (N) as a 64-bit integer + # - (N + 1) * 8 bytes: offsets of each element from the start of the + # data area + # - K bytes: the data area storing item values (with encoding and size + # depending on their respective types) + # - N * 8 bytes: `struct` format string for each element + # - N bytes: index into _back_transforms_mapping for each element + # (for reconstructing the corresponding Python value) + _types_mapping = { + int: "q", + float: "d", + bool: "xxxxxxx?", + str: "%ds", + bytes: "%ds", + None.__class__: "xxxxxx?x", + } + _alignment = 8 + _back_transforms_mapping = { + 0: lambda value: value, # int, float, bool + 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str + 2: lambda value: value.rstrip(b'\x00'), # bytes + 3: lambda _value: None, # None + } + + @staticmethod + def _extract_recreation_code(value): + """Used in concert with _back_transforms_mapping to convert values + into the appropriate Python objects when retrieving them from + the list as well as when storing them.""" + if not isinstance(value, (str, bytes, None.__class__)): + return 0 + elif isinstance(value, str): + return 1 + elif isinstance(value, bytes): + return 2 + else: + return 3 # NoneType + + def __init__(self, sequence=None, *, name=None): + if name is None or sequence is not None: + sequence = sequence or () + _formats = [ + self._types_mapping[type(item)] + if not isinstance(item, (str, bytes)) + else self._types_mapping[type(item)] % ( + self._alignment * (len(item) // self._alignment + 1), + ) + for item in sequence + ] + self._list_len = len(_formats) + assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len + offset = 0 + # The offsets of each list element into the shared memory's + # data area (0 meaning the start of the data area, not the start + # of the shared memory area). + self._allocated_offsets = [0] + for fmt in _formats: + offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) + self._allocated_offsets.append(offset) + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + requested_size = struct.calcsize( + "q" + self._format_size_metainfo + + "".join(_formats) + + self._format_packing_metainfo + + self._format_back_transform_codes + ) + + self.shm = SharedMemory(name, create=True, size=requested_size) + else: + self.shm = SharedMemory(name) + + if sequence is not None: + _enc = _encoding + struct.pack_into( + "q" + self._format_size_metainfo, + self.shm.buf, + 0, + self._list_len, + *(self._allocated_offsets) + ) + struct.pack_into( + "".join(_formats), + self.shm.buf, + self._offset_data_start, + *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) + ) + struct.pack_into( + self._format_packing_metainfo, + self.shm.buf, + self._offset_packing_formats, + *(v.encode(_enc) for v in _formats) + ) + struct.pack_into( + self._format_back_transform_codes, + self.shm.buf, + self._offset_back_transform_codes, + *(_recreation_codes) + ) + + else: + self._list_len = len(self) # Obtains size from offset 0 in buffer. + self._allocated_offsets = list( + struct.unpack_from( + self._format_size_metainfo, + self.shm.buf, + 1 * 8 + ) + ) + + def _get_packing_format(self, position): + "Gets the packing format for a single value stored in the list." + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + v = struct.unpack_from( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8 + )[0] + fmt = v.rstrip(b'\x00') + fmt_as_str = fmt.decode(_encoding) + + return fmt_as_str + + def _get_back_transform(self, position): + "Gets the back transformation function for a single value." + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + transform_code = struct.unpack_from( + "b", + self.shm.buf, + self._offset_back_transform_codes + position + )[0] + transform_function = self._back_transforms_mapping[transform_code] + + return transform_function + + def _set_packing_format_and_transform(self, position, fmt_as_str, value): + """Sets the packing format and back transformation code for a + single value in the list at the specified position.""" + + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + struct.pack_into( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8, + fmt_as_str.encode(_encoding) + ) + + transform_code = self._extract_recreation_code(value) + struct.pack_into( + "b", + self.shm.buf, + self._offset_back_transform_codes + position, + transform_code + ) + + def __getitem__(self, position): + position = position if position >= 0 else position + self._list_len + try: + offset = self._offset_data_start + self._allocated_offsets[position] + (v,) = struct.unpack_from( + self._get_packing_format(position), + self.shm.buf, + offset + ) + except IndexError: + raise IndexError("index out of range") + + back_transform = self._get_back_transform(position) + v = back_transform(v) + + return v + + def __setitem__(self, position, value): + position = position if position >= 0 else position + self._list_len + try: + item_offset = self._allocated_offsets[position] + offset = self._offset_data_start + item_offset + current_format = self._get_packing_format(position) + except IndexError: + raise IndexError("assignment index out of range") + + if not isinstance(value, (str, bytes)): + new_format = self._types_mapping[type(value)] + encoded_value = value + else: + allocated_length = self._allocated_offsets[position + 1] - item_offset + + encoded_value = (value.encode(_encoding) + if isinstance(value, str) else value) + if len(encoded_value) > allocated_length: + raise ValueError("bytes/str item exceeds available storage") + if current_format[-1] == "s": + new_format = current_format + else: + new_format = self._types_mapping[str] % ( + allocated_length, + ) + + self._set_packing_format_and_transform( + position, + new_format, + value + ) + struct.pack_into(new_format, self.shm.buf, offset, encoded_value) + + def __reduce__(self): + return partial(self.__class__, name=self.shm.name), () + + def __len__(self): + return struct.unpack_from("q", self.shm.buf, 0)[0] + + def __repr__(self): + return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' + + @property + def format(self): + "The struct packing format used by all currently stored items." + return "".join( + self._get_packing_format(i) for i in range(self._list_len) + ) + + @property + def _format_size_metainfo(self): + "The struct packing format used for the items' storage offsets." + return "q" * (self._list_len + 1) + + @property + def _format_packing_metainfo(self): + "The struct packing format used for the items' packing formats." + return "8s" * self._list_len + + @property + def _format_back_transform_codes(self): + "The struct packing format used for the items' back transforms." + return "b" * self._list_len + + @property + def _offset_data_start(self): + # - 8 bytes for the list length + # - (N + 1) * 8 bytes for the element offsets + return (self._list_len + 2) * 8 + + @property + def _offset_packing_formats(self): + return self._offset_data_start + self._allocated_offsets[-1] + + @property + def _offset_back_transform_codes(self): + return self._offset_packing_formats + self._list_len * 8 + + def count(self, value): + "L.count(value) -> integer -- return number of occurrences of value." + + return sum(value == entry for entry in self) + + def index(self, value): + """L.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present.""" + + for position, entry in enumerate(self): + if value == entry: + return position + else: + raise ValueError(f"{value!r} not in this container") + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/__main__.py b/vllm/lib/python3.10/site-packages/multiprocess/tests/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0de127e06a38f895225e79aa06678dc8e58e731 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/tests/__main__.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE + +import glob +import os +import sys +import subprocess as sp +python = sys.executable +try: + import pox + python = pox.which_python(version=True) or python +except ImportError: + pass +shell = sys.platform[:3] == 'win' + +suite = os.path.dirname(__file__) or os.path.curdir +tests = glob.glob(suite + os.path.sep + 'test_*.py') +tests = glob.glob(suite + os.path.sep + '__init__.py') + \ + [i for i in tests if 'main' not in i] + + +if __name__ == '__main__': + + failed = 0 + for test in tests: + p = sp.Popen([python, test], shell=shell).wait() + if p: + failed = 1 + print('') + exit(failed) diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91cbdc973fc73ee0ff0e9a7416bfa9028c4367f6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/__main__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44cea6b6c649db3466e42c3ce3083c92449ed853 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/mp_preload.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0ce33ed6a91351c8c5ec6b463f58dcb7580eb35 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_fork.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580188f6ceb8665779f2b239dbc6e35f3e98043b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ea15628078828e9d438ea650fb8e51b6d038115 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/multiprocess/tests/__pycache__/test_multiprocessing_spawn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py b/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..f331e381613626c6b406f782bb2e4afb850c658c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_fork.py @@ -0,0 +1,19 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("fork is not available on Windows") + +if sys.platform == 'darwin': + raise unittest.SkipTest("test may crash on macOS (bpo-33725)") + +install_tests_in_module_dict(globals(), 'fork') + +if __name__ == '__main__': + unittest.main() diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py b/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..b3251208c2060ee3ccc3fc30f4904534e98058f2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_forkserver.py @@ -0,0 +1,16 @@ +import unittest +from multiprocess.tests import install_tests_in_module_dict + +import sys +from test import support + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +if sys.platform == "win32": + raise unittest.SkipTest("forkserver is not available on Windows") + +install_tests_in_module_dict(globals(), 'forkserver') + +if __name__ == '__main__': + unittest.main() diff --git a/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py b/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..ab88d2261b3c1fa6c2b7ae7065cbc270ac2be676 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/tests/test_multiprocessing_main_handling.py @@ -0,0 +1,303 @@ +# tests __main__ module handling in multiprocessing +from test import support +from test.support import import_helper +# Skip tests if _multiprocessing wasn't built. +import_helper.import_module('_multiprocessing') + +import importlib +import importlib.machinery +import unittest +import sys +import os +import os.path +import py_compile + +from test.support import os_helper +from test.support.script_helper import ( + make_pkg, make_script, make_zip_pkg, make_zip_script, + assert_python_ok) + +if support.PGO: + raise unittest.SkipTest("test is not helpful for PGO") + +# Look up which start methods are available to test +import multiprocess as multiprocessing +AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) + +# Issue #22332: Skip tests if sem_open implementation is broken. +import_helper.import_module('multiprocess.synchronize') + +verbose = support.verbose + +test_source = """\ +# multiprocessing includes all sorts of shenanigans to make __main__ +# attributes accessible in the subprocess in a pickle compatible way. + +# We run the "doesn't work in the interactive interpreter" example from +# the docs to make sure it *does* work from an executed __main__, +# regardless of the invocation mechanism + +import sys +import time +sys.path.extend({0}) +from multiprocess import Pool, set_start_method + +# We use this __main__ defined function in the map call below in order to +# check that multiprocessing in correctly running the unguarded +# code in child processes and then making it available as __main__ +def f(x): + return x*x + +# Check explicit relative imports +if "check_sibling" in __file__: + # We're inside a package and not in a __main__.py file + # so make sure explicit relative imports work correctly + from . import sibling + +if __name__ == '__main__': + start_method = sys.argv[1] + set_start_method(start_method) + results = [] + with Pool(5) as pool: + pool.map_async(f, [1, 2, 3], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + + results.sort() + print(start_method, "->", results) + + pool.join() +""".format(sys.path) + +test_source_main_skipped_in_children = """\ +# __main__.py files have an implied "if __name__ == '__main__'" so +# multiprocessing should always skip running them in child processes + +# This means we can't use __main__ defined functions in child processes, +# so we just use "int" as a passthrough operation below + +if __name__ != "__main__": + raise RuntimeError("Should only be called as __main__!") + +import sys +import time +sys.path.extend({0}) +from multiprocess import Pool, set_start_method + +start_method = sys.argv[1] +set_start_method(start_method) +results = [] +with Pool(5) as pool: + pool.map_async(int, [1, 4, 9], callback=results.extend) + start_time = getattr(time,'monotonic',time.time)() + while not results: + time.sleep(0.05) + # up to 1 min to report the results + dt = getattr(time,'monotonic',time.time)() - start_time + if dt > 60.0: + raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) + +results.sort() +print(start_method, "->", results) + +pool.join() +""".format(sys.path) + +# These helpers were copied from test_cmd_line_script & tweaked a bit... + +def _make_test_script(script_dir, script_basename, + source=test_source, omit_suffix=False): + to_return = make_script(script_dir, script_basename, + source, omit_suffix) + # Hack to check explicit relative imports + if script_basename == "check_sibling": + make_script(script_dir, "sibling", "") + importlib.invalidate_caches() + return to_return + +def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source=test_source, depth=1): + to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, + source, depth) + importlib.invalidate_caches() + return to_return + +# There's no easy way to pass the script directory in to get +# -m to work (avoiding that is the whole point of making +# directories and zipfiles executable!) +# So we fake it for testing purposes with a custom launch script +launch_source = """\ +import sys, os.path, runpy +sys.path.insert(0, %s) +runpy._run_module_as_main(%r) +""" + +def _make_launch_script(script_dir, script_basename, module_name, path=None): + if path is None: + path = "os.path.dirname(__file__)" + else: + path = repr(path) + source = launch_source % (path, module_name) + to_return = make_script(script_dir, script_basename, source) + importlib.invalidate_caches() + return to_return + +class MultiProcessingCmdLineMixin(): + maxDiff = None # Show full tracebacks on subprocess failure + + def setUp(self): + if self.start_method not in AVAILABLE_START_METHODS: + self.skipTest("%r start method not available" % self.start_method) + + def _check_output(self, script_name, exit_code, out, err): + if verbose > 1: + print("Output from test script %r:" % script_name) + print(repr(out)) + self.assertEqual(exit_code, 0) + self.assertEqual(err.decode('utf-8'), '') + expected_results = "%s -> [1, 4, 9]" % self.start_method + self.assertEqual(out.decode('utf-8').strip(), expected_results) + + def _check_script(self, script_name, *cmd_line_switches): + if not __debug__: + cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) + run_args = cmd_line_switches + (script_name, self.start_method) + rc, out, err = assert_python_ok(*run_args, __isolated=False) + self._check_output(script_name, rc, out, err) + + def test_basic_script(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + self._check_script(script_name) + + def test_basic_script_no_suffix(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script', + omit_suffix=True) + self._check_script(script_name) + + def test_ipython_workaround(self): + # Some versions of the IPython launch script are missing the + # __name__ = "__main__" guard, and multiprocessing has long had + # a workaround for that case + # See https://github.com/ipython/ipython/issues/4698 + source = test_source_main_skipped_in_children + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'ipython', + source=source) + self._check_script(script_name) + script_no_suffix = _make_test_script(script_dir, 'ipython', + source=source, + omit_suffix=True) + self._check_script(script_no_suffix) + + def test_script_compiled(self): + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, 'script') + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + self._check_script(pyc_file) + + def test_directory(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + self._check_script(script_dir) + + def test_directory_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + self._check_script(script_dir) + + def test_zipfile(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) + self._check_script(zip_name) + + def test_zipfile_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + script_name = _make_test_script(script_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) + self._check_script(zip_name) + + def test_module_in_package(self): + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, 'check_sibling') + launch_name = _make_launch_script(script_dir, 'launch', + 'test_pkg.check_sibling') + self._check_script(launch_name) + + def test_module_in_package_in_zipfile(self): + with os_helper.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_module_in_subpackage_in_zipfile(self): + with os_helper.temp_dir() as script_dir: + zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) + self._check_script(launch_name) + + def test_package(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + + def test_package_compiled(self): + source = self.main_in_children_source + with os_helper.temp_dir() as script_dir: + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir) + script_name = _make_test_script(pkg_dir, '__main__', + source=source) + compiled_name = py_compile.compile(script_name, doraise=True) + os.remove(script_name) + pyc_file = import_helper.make_legacy_pyc(script_name) + launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') + self._check_script(launch_name) + +# Test all supported start methods (setupClass skips as appropriate) + +class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'spawn' + main_in_children_source = test_source_main_skipped_in_children + +class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'fork' + main_in_children_source = test_source + +class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): + start_method = 'forkserver' + main_in_children_source = test_source_main_skipped_in_children + +def tearDownModule(): + support.reap_children() + +if __name__ == '__main__': + unittest.main() diff --git a/vllm/lib/python3.10/site-packages/multiprocess/util.py b/vllm/lib/python3.10/site-packages/multiprocess/util.py new file mode 100644 index 0000000000000000000000000000000000000000..1f248d0957c10b69da4767b37bafa8bb92419d10 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/multiprocess/util.py @@ -0,0 +1,489 @@ +# +# Module providing various facilities to other parts of the package +# +# multiprocessing/util.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import itertools +import sys +import weakref +import atexit +import threading # we want threading to install it's + # cleanup function before multiprocessing does +from subprocess import _args_from_interpreter_flags + +from . import process + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', + ] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 + +LOGGER_NAME = 'multiprocess' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + +def sub_debug(msg, *args): + if _logger: + _logger.log(SUBDEBUG, msg, *args) + +def debug(msg, *args): + if _logger: + _logger.log(DEBUG, msg, *args) + +def info(msg, *args): + if _logger: + _logger.log(INFO, msg, *args) + +def sub_warning(msg, *args): + if _logger: + _logger.log(SUBWARNING, msg, *args) + +def get_logger(): + ''' + Returns logger used by multiprocess + ''' + global _logger + import logging + + logging._acquireLock() + try: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + + finally: + logging._releaseLock() + + return _logger + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +# Abstract socket support + +def _platform_supports_abstract_sockets(): + if sys.platform == "linux": + return True + if hasattr(sys, 'getandroidapilevel'): + return True + return False + + +def is_abstract_socket_namespace(address): + if not address: + return False + if isinstance(address, bytes): + return address[0] == 0 + elif isinstance(address, str): + return address[0] == "\0" + raise TypeError(f'address type of {address!r} unrecognized') + + +abstract_sockets_supported = _platform_supports_abstract_sockets() + +# +# Function returning a temp directory which will be removed on exit +# + +def _remove_temp_dir(rmtree, tempdir): + rmtree(tempdir) + + current_process = process.current_process() + # current_process() can be None if the finalizer is called + # late during Python finalization + if current_process is not None: + current_process._config['tempdir'] = None + +def get_temp_dir(): + # get name of a temp directory which will be automatically cleaned up + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: + import shutil, tempfile + tempdir = tempfile.mkdtemp(prefix='pymp-') + info('created temp directory %s', tempdir) + # keep a strong reference to shutil.rmtree(), since the finalizer + # can be called late during Python shutdown + Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), + exitpriority=-100) + process.current_process()._config['tempdir'] = tempdir + return tempdir + +# +# Support for reinitialization of objects when bootstrapping a child process +# + +_afterfork_registry = weakref.WeakValueDictionary() +_afterfork_counter = itertools.count() + +def _run_after_forkers(): + items = list(_afterfork_registry.items()) + items.sort() + for (index, ident, func), obj in items: + try: + func(obj) + except Exception as e: + info('after forker raised exception %s', e) + +def register_after_fork(obj, func): + _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj + +# +# Finalization using weakrefs +# + +_finalizer_registry = {} +_finalizer_counter = itertools.count() + + +class Finalize(object): + ''' + Class which supports object finalization using weakrefs + ''' + def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): + if (exitpriority is not None) and not isinstance(exitpriority,int): + raise TypeError( + "Exitpriority ({0!r}) must be None or int, not {1!s}".format( + exitpriority, type(exitpriority))) + + if obj is not None: + self._weakref = weakref.ref(obj, self) + elif exitpriority is None: + raise ValueError("Without object, exitpriority cannot be None") + + self._callback = callback + self._args = args + self._kwargs = kwargs or {} + self._key = (exitpriority, next(_finalizer_counter)) + self._pid = os.getpid() + + _finalizer_registry[self._key] = self + + def __call__(self, wr=None, + # Need to bind these locally because the globals can have + # been cleared at shutdown + _finalizer_registry=_finalizer_registry, + sub_debug=sub_debug, getpid=os.getpid): + ''' + Run the callback unless it has already been called or cancelled + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + sub_debug('finalizer no longer registered') + else: + if self._pid != getpid(): + sub_debug('finalizer ignored because different process') + res = None + else: + sub_debug('finalizer calling %s with args %s and kwargs %s', + self._callback, self._args, self._kwargs) + res = self._callback(*self._args, **self._kwargs) + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + return res + + def cancel(self): + ''' + Cancel finalization of the object + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + pass + else: + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + + def still_active(self): + ''' + Return whether this finalizer is still waiting to invoke callback + ''' + return self._key in _finalizer_registry + + def __repr__(self): + try: + obj = self._weakref() + except (AttributeError, TypeError): + obj = None + + if obj is None: + return '<%s object, dead>' % self.__class__.__name__ + + x = '<%s object, callback=%s' % ( + self.__class__.__name__, + getattr(self._callback, '__name__', self._callback)) + if self._args: + x += ', args=' + str(self._args) + if self._kwargs: + x += ', kwargs=' + str(self._kwargs) + if self._key[0] is not None: + x += ', exitpriority=' + str(self._key[0]) + return x + '>' + + +def _run_finalizers(minpriority=None): + ''' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + ''' + if _finalizer_registry is None: + # This function may be called after this module's globals are + # destroyed. See the _exit_function function in this module for more + # notes. + return + + if minpriority is None: + f = lambda p : p[0] is not None + else: + f = lambda p : p[0] is not None and p[0] >= minpriority + + # Careful: _finalizer_registry may be mutated while this function + # is running (either by a GC run or by another thread). + + # list(_finalizer_registry) should be atomic, while + # list(_finalizer_registry.items()) is not. + keys = [key for key in list(_finalizer_registry) if f(key)] + keys.sort(reverse=True) + + for key in keys: + finalizer = _finalizer_registry.get(key) + # key may have been removed from the registry + if finalizer is not None: + sub_debug('calling %s', finalizer) + try: + finalizer() + except Exception: + import traceback + traceback.print_exc() + + if minpriority is None: + _finalizer_registry.clear() + +# +# Clean up on exit +# + +def is_exiting(): + ''' + Returns true if the process is shutting down + ''' + return _exiting or _exiting is None + +_exiting = False + +def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, + active_children=process.active_children, + current_process=process.current_process): + # We hold on to references to functions in the arglist due to the + # situation described below, where this function is called after this + # module's globals are destroyed. + + global _exiting + + if not _exiting: + _exiting = True + + info('process shutting down') + debug('running all "atexit" finalizers with priority >= 0') + _run_finalizers(0) + + if current_process() is not None: + # We check if the current process is None here because if + # it's None, any call to ``active_children()`` will raise + # an AttributeError (active_children winds up trying to + # get attributes from util._current_process). One + # situation where this can happen is if someone has + # manipulated sys.modules, causing this module to be + # garbage collected. The destructor for the module type + # then replaces all values in the module dict with None. + # For instance, after setuptools runs a test it replaces + # sys.modules with a copy created earlier. See issues + # #9775 and #15881. Also related: #4106, #9205, and + # #9207. + + for p in active_children(): + if p.daemon: + info('calling terminate() for daemon %s', p.name) + p._popen.terminate() + + for p in active_children(): + info('calling join() for process %s', p.name) + p.join() + + debug('running the remaining "atexit" finalizers') + _run_finalizers() + +atexit.register(_exit_function) + +# +# Some fork aware types +# + +class ForkAwareThreadLock(object): + def __init__(self): + self._lock = threading.Lock() + self.acquire = self._lock.acquire + self.release = self._lock.release + register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + +class ForkAwareLocal(threading.local): + def __init__(self): + register_after_fork(self, lambda obj : obj.__dict__.clear()) + def __reduce__(self): + return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) +# +# Close sys.stdin and replace stdin with os.devnull +# + +def _close_stdin(): + if sys.stdin is None: + return + + try: + sys.stdin.close() + except (OSError, ValueError): + pass + + try: + fd = os.open(os.devnull, os.O_RDONLY) + try: + sys.stdin = open(fd, encoding="utf-8", closefd=False) + except: + os.close(fd) + raise + except (OSError, ValueError): + pass + +# +# Flush standard streams, if any +# + +def _flush_std_streams(): + try: + sys.stdout.flush() + except (AttributeError, ValueError): + pass + try: + sys.stderr.flush() + except (AttributeError, ValueError): + pass + +# +# Start a program with only specified fds kept open +# + +def spawnv_passfds(path, args, passfds): + import _posixsubprocess + passfds = tuple(sorted(map(int, passfds))) + errpipe_read, errpipe_write = os.pipe() + try: + return _posixsubprocess.fork_exec( + args, [os.fsencode(path)], True, passfds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, None, None, None, -1, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + + +def close_fds(*fds): + """Close each file descriptor given as an argument""" + for fd in fds: + os.close(fd) + + +def _cleanup_tests(): + """Cleanup multiprocessing resources when multiprocessing tests + completed.""" + + from test import support + + # cleanup multiprocessing + process._cleanup() + + # Stop the ForkServer process if it's running + from multiprocess import forkserver + forkserver._forkserver._stop() + + # Stop the ResourceTracker process if it's running + from multiprocess import resource_tracker + resource_tracker._resource_tracker._stop() + + # bpo-37421: Explicitly call _run_finalizers() to remove immediately + # temporary directories created by multiprocessing.util.get_temp_dir(). + _run_finalizers() + support.gc_collect() + + support.reap_children() diff --git a/vllm/lib/python3.10/site-packages/py/__pycache__/_builtin.cpython-310.pyc b/vllm/lib/python3.10/site-packages/py/__pycache__/_builtin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88c07ca73a14a1edd80644286364a9e85736f389 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/py/__pycache__/_builtin.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/py/__pycache__/_version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/py/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f2cdb384a534475c2677779fb29acc9827871ff Binary files /dev/null and b/vllm/lib/python3.10/site-packages/py/__pycache__/_version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/py/__pycache__/_xmlgen.cpython-310.pyc b/vllm/lib/python3.10/site-packages/py/__pycache__/_xmlgen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c7c6e17353b65d8552678a7660af3fc17670f9e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/py/__pycache__/_xmlgen.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/py/_path/common.py b/vllm/lib/python3.10/site-packages/py/_path/common.py new file mode 100644 index 0000000000000000000000000000000000000000..2364e5fef504a6b20000efc9b2d47bff1defef27 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/py/_path/common.py @@ -0,0 +1,459 @@ +""" +""" +import warnings +import os +import sys +import posixpath +import fnmatch +import py + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') + +try: + # FileNotFoundError might happen in py34, and is not available with py27. + import_errors = (ImportError, FileNotFoundError) +except NameError: + import_errors = (ImportError,) + +try: + from os import fspath +except ImportError: + def fspath(path): + """ + Return the string representation of the path. + If str or bytes is passed in, it is returned unchanged. + This code comes from PEP 519, modified to support earlier versions of + python. + + This is required for python < 3.6. + """ + if isinstance(path, (py.builtin.text, py.builtin.bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + return path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + try: + import pathlib + except import_errors: + pass + else: + if isinstance(path, pathlib.PurePath): + return py.builtin.text(path) + + raise TypeError("expected str, bytes or os.PathLike object, not " + + path_type.__name__) + +class Checkers: + _depend_on_existence = 'exists', 'link', 'dir', 'file' + + def __init__(self, path): + self.path = path + + def dir(self): + raise NotImplementedError + + def file(self): + raise NotImplementedError + + def dotfile(self): + return self.path.basename.startswith('.') + + def ext(self, arg): + if not arg.startswith('.'): + arg = '.' + arg + return self.path.ext == arg + + def exists(self): + raise NotImplementedError + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == 'not': + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError( + "no %r checker available for %r" % (name, self.path)) + try: + if py.code.getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = 'not' + name + if name in kw: + if not kw.get(name): + return False + return True + +class NeverRaised(Exception): + pass + +class PathBase(object): + """ shared implementation for filesystem path objects.""" + Checkers = Checkers + + def __div__(self, other): + return self.join(fspath(other)) + __truediv__ = __div__ # py3k + + def basename(self): + """ basename part of path. """ + return self._getbyspec('basename')[0] + basename = property(basename, None, None, basename.__doc__) + + def dirname(self): + """ dirname part of path. """ + return self._getbyspec('dirname')[0] + dirname = property(dirname, None, None, dirname.__doc__) + + def purebasename(self): + """ pure base name of the path.""" + return self._getbyspec('purebasename')[0] + purebasename = property(purebasename, None, None, purebasename.__doc__) + + def ext(self): + """ extension of the path (including the '.').""" + return self._getbyspec('ext')[0] + ext = property(ext, None, None, ext.__doc__) + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + return self.new(basename='').join(*args, **kwargs) + + def read_binary(self): + """ read and return a bytestring from reading the path. """ + with self.open('rb') as f: + return f.read() + + def read_text(self, encoding): + """ read and return a Unicode string from reading the path. """ + with self.open("r", encoding=encoding) as f: + return f.read() + + + def read(self, mode='r'): + """ read and return a bytestring from reading the path. """ + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """ read and return a list of lines from the path. if cr is False, the +newline will be removed from the end of each line. """ + if sys.version_info < (3, ): + mode = 'rU' + else: # python 3 deprecates mode "U" in favor of "newline" option + mode = 'r' + + if not cr: + content = self.read(mode) + return content.split('\n') + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """ (deprecated) return object unpickled from self.read() """ + f = self.open('rb') + try: + import pickle + return py.error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """ move this path to target. """ + if target.relto(self): + raise py.error.EINVAL( + target, + "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except py.error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def __repr__(self): + """ return a string representation of this path. """ + return repr(str(self)) + + def check(self, **kw): + """ check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file=1 # is a file + file=0 # is not a file (may not even exist) + dir=1 # is a dir + link=1 # is a link + exists=1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + kw = {'exists': 1} + return self.Checkers(self)._evaluate(kw) + + def fnmatch(self, pattern): + """return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """ return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, PathBase)): + raise TypeError("%r: not a string or path object" %(relpath,)) + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + #assert strrelpath[-1] == self.sep + #assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, '_name', None) == 'nt': + if os.path.normcase(strself).startswith( + os.path.normcase(strrelpath)): + return strself[len(strrelpath):] + elif strself.startswith(strrelpath): + return strself[len(strrelpath):] + return "" + + def ensure_dir(self, *args): + """ ensure the path joined with args is a directory. """ + return self.ensure(*args, **{"dir": True}) + + def bestrelpath(self, dest): + """ return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + l = [os.pardir] * n + if reldest: + l.append(reldest) + target = dest.sep.join(l) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """ return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + l = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + l.append(current) + if not reverse: + l.reverse() + return l + + def common(self, other): + """ return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """ return new path object with 'other' added to the basename""" + return self.new(basename=self.basename+str(other)) + + def __cmp__(self, other): + """ return sort value (-1, 0, +1). """ + try: + return cmp(self.strpath, other.strpath) + except AttributeError: + return cmp(str(self), str(other)) # self.path, other.path) + + def __lt__(self, other): + try: + return self.strpath < other.strpath + except AttributeError: + return str(self) < str(other) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """ yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + for x in Visitor(fil, rec, ignore, bf, sort).gen(self): + yield x + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, '__call__'): + warnings.warn(DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), stacklevel=3) + res.sort(sort) + else: + res.sort() + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return self.strpath == str(other) + + def __fspath__(self): + return self.strpath + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, py.builtin._basestring): + fil = FNMatcher(fil) + if isinstance(rec, py.builtin._basestring): + self.rec = FNMatcher(rec) + elif not hasattr(rec, '__call__') and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = sort and sorted or (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort([p for p in entries + if p.check(dir=1) and (rec is None or rec(p))]) + if not self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if (pattern.find(path.sep) == -1 and + iswin32 and + pattern.find(posixpath.sep) != -1): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = '*' + path.sep + pattern + return fnmatch.fnmatch(name, pattern) diff --git a/vllm/lib/python3.10/site-packages/vllm-0.7.3.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/vllm-0.7.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/vllm-0.7.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/vllm-0.7.3.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/vllm-0.7.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/vllm-0.7.3.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License.