ZTWHHH commited on
Commit
53aee92
·
verified ·
1 Parent(s): 75bebda

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so +3 -0
  3. parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so +3 -0
  4. parrot/lib/python3.10/site-packages/scipy/special/special/binom.h +89 -0
  5. parrot/lib/python3.10/site-packages/scipy/special/special/cephes/besselpoly.h +51 -0
  6. parrot/lib/python3.10/site-packages/scipy/special/special/cephes/igami.h +313 -0
  7. parrot/lib/python3.10/site-packages/scipy/special/special/config.h +226 -0
  8. parrot/lib/python3.10/site-packages/scipy/special/special/digamma.h +204 -0
  9. parrot/lib/python3.10/site-packages/scipy/special/special/error.h +64 -0
  10. parrot/lib/python3.10/site-packages/scipy/special/special/evalpoly.h +47 -0
  11. parrot/lib/python3.10/site-packages/scipy/special/special/hyp2f1.h +694 -0
  12. parrot/lib/python3.10/site-packages/scipy/special/special/lambertw.h +150 -0
  13. parrot/lib/python3.10/site-packages/scipy/special/special/loggamma.h +163 -0
  14. parrot/lib/python3.10/site-packages/scipy/special/special/tools.h +269 -0
  15. parrot/lib/python3.10/site-packages/scipy/special/special/trig.h +111 -0
  16. parrot/lib/python3.10/site-packages/scipy/special/special/wright_bessel.h +841 -0
  17. parrot/lib/python3.10/site-packages/scipy/special/special/zlog1.h +35 -0
  18. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spence.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ufunc_signatures.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/scipy/special/tests/data/__init__.py +0 -0
  28. parrot/lib/python3.10/site-packages/scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc +0 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h +23 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h +30 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface.h +39 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_compositeimplicitautograd_dispatch.h +26 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h +44 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_ops.h +105 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h +26 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h +50 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h +25 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_backward_cuda_dispatch.h +23 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h +91 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h +24 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h +28 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_native.h +21 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h +25 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split.h +69 -0
  45. vllm/lib/python3.10/site-packages/dotenv/__pycache__/__init__.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/dotenv/__pycache__/__main__.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/dotenv/__pycache__/cli.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/dotenv/__pycache__/ipython.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/dotenv/__pycache__/main.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/dotenv/__pycache__/parser.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1582,3 +1582,5 @@ vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.8.0
1582
  vllm/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1583
  vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/pip-24.3.1-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
1584
  vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.3.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
 
 
 
1582
  vllm/lib/python3.10/site-packages/pydantic_core/__pycache__/core_schema.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1583
  vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/pip-24.3.1-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
1584
  vllm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.3.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
1585
+ parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1586
+ parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5073ae2a918ead9c332ed5f334e8b9ac1c2552cb2c36a961a27270819b0883a4
3
+ size 346369
parrot/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1aa7a2910bf166794c476bcdccb503d06ad6542b66d6128d1a23c04ebf7c14
3
+ size 301392
parrot/lib/python3.10/site-packages/scipy/special/special/binom.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2024.
2
+ *
3
+ * Original authors: Pauli Virtanen, Eric Moore
4
+ */
5
+
6
+ // Binomial coefficient
7
+
8
+ #pragma once
9
+
10
+ #include "config.h"
11
+
12
+ #include "cephes/beta.h"
13
+ #include "cephes/gamma.h"
14
+
15
+ namespace special {
16
+
17
+ SPECFUN_HOST_DEVICE inline double binom(double n, double k) {
18
+ double kx, nx, num, den, dk, sgn;
19
+
20
+ if (n < 0) {
21
+ nx = std::floor(n);
22
+ if (n == nx) {
23
+ // Undefined
24
+ return std::numeric_limits<double>::quiet_NaN();
25
+ }
26
+ }
27
+
28
+ kx = std::floor(k);
29
+ if (k == kx && (std::abs(n) > 1E-8 || n == 0)) {
30
+ /* Integer case: use multiplication formula for less rounding
31
+ * error for cases where the result is an integer.
32
+ *
33
+ * This cannot be used for small nonzero n due to loss of
34
+ * precision. */
35
+ nx = std::floor(n);
36
+ if (nx == n && kx > nx / 2 && nx > 0) {
37
+ // Reduce kx by symmetry
38
+ kx = nx - kx;
39
+ }
40
+
41
+ if (kx >= 0 && kx < 20) {
42
+ num = 1.0;
43
+ den = 1.0;
44
+ for (int i = 1; i < 1 + static_cast<int>(kx); i++) {
45
+ num *= i + n - kx;
46
+ den *= i;
47
+ if (std::abs(num) > 1E50) {
48
+ num /= den;
49
+ den = 1.0;
50
+ }
51
+ }
52
+ return num / den;
53
+ }
54
+ }
55
+
56
+ // general case
57
+ if (n >= 1E10 * k and k > 0) {
58
+ // avoid under/overflows intermediate results
59
+ return std::exp(-cephes::lbeta(1 + n - k, 1 + k) - std::log(n + 1));
60
+ }
61
+ if (k > 1E8 * std::abs(n)) {
62
+ // avoid loss of precision
63
+ num = cephes::Gamma(1 + n) / std::abs(k) + cephes::Gamma(1 + n) * n / (2 * k * k); // + ...
64
+ num /= M_PI * std::pow(std::abs(k), n);
65
+ if (k > 0) {
66
+ kx = std::floor(k);
67
+ if (static_cast<int>(kx) == kx) {
68
+ dk = k - kx;
69
+ sgn = (static_cast<int>(kx) % 2 == 0) ? 1 : -1;
70
+ } else {
71
+ dk = k;
72
+ sgn = 1;
73
+ }
74
+ return num * std::sin((dk - n) * M_PI) * sgn;
75
+ }
76
+ kx = std::floor(k);
77
+ if (static_cast<int>(kx) == kx) {
78
+ return 0;
79
+ }
80
+ return num * std::sin(k * M_PI);
81
+ }
82
+ return 1 / (n + 1) / cephes::beta(1 + n - k, 1 + k);
83
+ }
84
+
85
+ SPECFUN_HOST_DEVICE inline float binom(float n, float k) {
86
+ return binom(static_cast<double>(n), static_cast<double>(k));
87
+ }
88
+
89
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/cephes/besselpoly.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated into C++ by SciPy developers in 2024.
2
+ *
3
+ * This was not part of the original cephes library.
4
+ */
5
+ #pragma once
6
+
7
+ #include "../config.h"
8
+ #include "gamma.h"
9
+
10
+ namespace special {
11
+ namespace cephes {
12
+ namespace detail {
13
+
14
+ constexpr double besselpoly_EPS = 1.0e-17;
15
+ }
16
+
17
+ SPECFUN_HOST_DEVICE inline double besselpoly(double a, double lambda, double nu) {
18
+
19
+ int m, factor = 0;
20
+ double Sm, relerr, Sol;
21
+ double sum = 0.0;
22
+
23
+ /* Special handling for a = 0.0 */
24
+ if (a == 0.0) {
25
+ if (nu == 0.0) {
26
+ return 1.0 / (lambda + 1);
27
+ } else {
28
+ return 0.0;
29
+ }
30
+ }
31
+ /* Special handling for negative and integer nu */
32
+ if ((nu < 0) && (std::floor(nu) == nu)) {
33
+ nu = -nu;
34
+ factor = static_cast<int>(nu) % 2;
35
+ }
36
+ Sm = std::exp(nu * std::log(a)) / (Gamma(nu + 1) * (lambda + nu + 1));
37
+ m = 0;
38
+ do {
39
+ sum += Sm;
40
+ Sol = Sm;
41
+ Sm *= -a * a * (lambda + nu + 1 + 2 * m) / ((nu + m + 1) * (m + 1) * (lambda + nu + 1 + 2 * m + 2));
42
+ m++;
43
+ relerr = std::abs((Sm - Sol) / Sm);
44
+ } while (relerr > detail::besselpoly_EPS && m < 1000);
45
+ if (!factor)
46
+ return sum;
47
+ else
48
+ return -sum;
49
+ }
50
+ } // namespace cephes
51
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/cephes/igami.h ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated into C++ by SciPy developers in 2024.
2
+ * Original header with Copyright information appears below.
3
+ */
4
+
5
+ /*
6
+ * (C) Copyright John Maddock 2006.
7
+ * Use, modification and distribution are subject to the
8
+ * Boost Software License, Version 1.0. (See accompanying file
9
+ * LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
10
+ */
11
+ #pragma once
12
+
13
+ #include "../config.h"
14
+ #include "../error.h"
15
+
16
+ #include "const.h"
17
+ #include "gamma.h"
18
+ #include "igam.h"
19
+ #include "polevl.h"
20
+
21
+ namespace special {
22
+ namespace cephes {
23
+
24
+ namespace detail {
25
+
26
+ SPECFUN_HOST_DEVICE double find_inverse_s(double p, double q) {
27
+ /*
28
+ * Computation of the Incomplete Gamma Function Ratios and their Inverse
29
+ * ARMIDO R. DIDONATO and ALFRED H. MORRIS, JR.
30
+ * ACM Transactions on Mathematical Software, Vol. 12, No. 4,
31
+ * December 1986, Pages 377-393.
32
+ *
33
+ * See equation 32.
34
+ */
35
+ double s, t;
36
+ constexpr double a[4] = {0.213623493715853, 4.28342155967104, 11.6616720288968, 3.31125922108741};
37
+ constexpr double b[5] = {0.3611708101884203e-1, 1.27364489782223, 6.40691597760039, 6.61053765625462, 1};
38
+
39
+ if (p < 0.5) {
40
+ t = std::sqrt(-2 * std::log(p));
41
+ } else {
42
+ t = std::sqrt(-2 * std::log(q));
43
+ }
44
+ s = t - polevl(t, a, 3) / polevl(t, b, 4);
45
+ if (p < 0.5)
46
+ s = -s;
47
+ return s;
48
+ }
49
+
50
+ SPECFUN_HOST_DEVICE inline double didonato_SN(double a, double x, unsigned N, double tolerance) {
51
+ /*
52
+ * Computation of the Incomplete Gamma Function Ratios and their Inverse
53
+ * ARMIDO R. DIDONATO and ALFRED H. MORRIS, JR.
54
+ * ACM Transactions on Mathematical Software, Vol. 12, No. 4,
55
+ * December 1986, Pages 377-393.
56
+ *
57
+ * See equation 34.
58
+ */
59
+ double sum = 1.0;
60
+
61
+ if (N >= 1) {
62
+ unsigned i;
63
+ double partial = x / (a + 1);
64
+
65
+ sum += partial;
66
+ for (i = 2; i <= N; ++i) {
67
+ partial *= x / (a + i);
68
+ sum += partial;
69
+ if (partial < tolerance) {
70
+ break;
71
+ }
72
+ }
73
+ }
74
+ return sum;
75
+ }
76
+
77
+ SPECFUN_HOST_DEVICE inline double find_inverse_gamma(double a, double p, double q) {
78
+ /*
79
+ * In order to understand what's going on here, you will
80
+ * need to refer to:
81
+ *
82
+ * Computation of the Incomplete Gamma Function Ratios and their Inverse
83
+ * ARMIDO R. DIDONATO and ALFRED H. MORRIS, JR.
84
+ * ACM Transactions on Mathematical Software, Vol. 12, No. 4,
85
+ * December 1986, Pages 377-393.
86
+ */
87
+ double result;
88
+
89
+ if (a == 1) {
90
+ if (q > 0.9) {
91
+ result = -std::log1p(-p);
92
+ } else {
93
+ result = -std::log(q);
94
+ }
95
+ } else if (a < 1) {
96
+ double g = special::cephes::Gamma(a);
97
+ double b = q * g;
98
+
99
+ if ((b > 0.6) || ((b >= 0.45) && (a >= 0.3))) {
100
+ /* DiDonato & Morris Eq 21:
101
+ *
102
+ * There is a slight variation from DiDonato and Morris here:
103
+ * the first form given here is unstable when p is close to 1,
104
+ * making it impossible to compute the inverse of Q(a,x) for small
105
+ * q. Fortunately the second form works perfectly well in this case.
106
+ */
107
+ double u;
108
+ if ((b * q > 1e-8) && (q > 1e-5)) {
109
+ u = std::pow(p * g * a, 1 / a);
110
+ } else {
111
+ u = std::exp((-q / a) - SCIPY_EULER);
112
+ }
113
+ result = u / (1 - (u / (a + 1)));
114
+ } else if ((a < 0.3) && (b >= 0.35)) {
115
+ /* DiDonato & Morris Eq 22: */
116
+ double t = std::exp(-SCIPY_EULER - b);
117
+ double u = t * std::exp(t);
118
+ result = t * std::exp(u);
119
+ } else if ((b > 0.15) || (a >= 0.3)) {
120
+ /* DiDonato & Morris Eq 23: */
121
+ double y = -std::log(b);
122
+ double u = y - (1 - a) * std::log(y);
123
+ result = y - (1 - a) * std::log(u) - std::log(1 + (1 - a) / (1 + u));
124
+ } else if (b > 0.1) {
125
+ /* DiDonato & Morris Eq 24: */
126
+ double y = -std::log(b);
127
+ double u = y - (1 - a) * std::log(y);
128
+ result = y - (1 - a) * std::log(u) -
129
+ std::log((u * u + 2 * (3 - a) * u + (2 - a) * (3 - a)) / (u * u + (5 - a) * u + 2));
130
+ } else {
131
+ /* DiDonato & Morris Eq 25: */
132
+ double y = -std::log(b);
133
+ double c1 = (a - 1) * std::log(y);
134
+ double c1_2 = c1 * c1;
135
+ double c1_3 = c1_2 * c1;
136
+ double c1_4 = c1_2 * c1_2;
137
+ double a_2 = a * a;
138
+ double a_3 = a_2 * a;
139
+
140
+ double c2 = (a - 1) * (1 + c1);
141
+ double c3 = (a - 1) * (-(c1_2 / 2) + (a - 2) * c1 + (3 * a - 5) / 2);
142
+ double c4 = (a - 1) * ((c1_3 / 3) - (3 * a - 5) * c1_2 / 2 + (a_2 - 6 * a + 7) * c1 +
143
+ (11 * a_2 - 46 * a + 47) / 6);
144
+ double c5 = (a - 1) * (-(c1_4 / 4) + (11 * a - 17) * c1_3 / 6 + (-3 * a_2 + 13 * a - 13) * c1_2 +
145
+ (2 * a_3 - 25 * a_2 + 72 * a - 61) * c1 / 2 +
146
+ (25 * a_3 - 195 * a_2 + 477 * a - 379) / 12);
147
+
148
+ double y_2 = y * y;
149
+ double y_3 = y_2 * y;
150
+ double y_4 = y_2 * y_2;
151
+ result = y + c1 + (c2 / y) + (c3 / y_2) + (c4 / y_3) + (c5 / y_4);
152
+ }
153
+ } else {
154
+ /* DiDonato and Morris Eq 31: */
155
+ double s = find_inverse_s(p, q);
156
+
157
+ double s_2 = s * s;
158
+ double s_3 = s_2 * s;
159
+ double s_4 = s_2 * s_2;
160
+ double s_5 = s_4 * s;
161
+ double ra = std::sqrt(a);
162
+
163
+ double w = a + s * ra + (s_2 - 1) / 3;
164
+ w += (s_3 - 7 * s) / (36 * ra);
165
+ w -= (3 * s_4 + 7 * s_2 - 16) / (810 * a);
166
+ w += (9 * s_5 + 256 * s_3 - 433 * s) / (38880 * a * ra);
167
+
168
+ if ((a >= 500) && (std::abs(1 - w / a) < 1e-6)) {
169
+ result = w;
170
+ } else if (p > 0.5) {
171
+ if (w < 3 * a) {
172
+ result = w;
173
+ } else {
174
+ double D = std::fmax(2, a * (a - 1));
175
+ double lg = special::cephes::lgam(a);
176
+ double lb = std::log(q) + lg;
177
+ if (lb < -D * 2.3) {
178
+ /* DiDonato and Morris Eq 25: */
179
+ double y = -lb;
180
+ double c1 = (a - 1) * std::log(y);
181
+ double c1_2 = c1 * c1;
182
+ double c1_3 = c1_2 * c1;
183
+ double c1_4 = c1_2 * c1_2;
184
+ double a_2 = a * a;
185
+ double a_3 = a_2 * a;
186
+
187
+ double c2 = (a - 1) * (1 + c1);
188
+ double c3 = (a - 1) * (-(c1_2 / 2) + (a - 2) * c1 + (3 * a - 5) / 2);
189
+ double c4 = (a - 1) * ((c1_3 / 3) - (3 * a - 5) * c1_2 / 2 + (a_2 - 6 * a + 7) * c1 +
190
+ (11 * a_2 - 46 * a + 47) / 6);
191
+ double c5 =
192
+ (a - 1) * (-(c1_4 / 4) + (11 * a - 17) * c1_3 / 6 + (-3 * a_2 + 13 * a - 13) * c1_2 +
193
+ (2 * a_3 - 25 * a_2 + 72 * a - 61) * c1 / 2 +
194
+ (25 * a_3 - 195 * a_2 + 477 * a - 379) / 12);
195
+
196
+ double y_2 = y * y;
197
+ double y_3 = y_2 * y;
198
+ double y_4 = y_2 * y_2;
199
+ result = y + c1 + (c2 / y) + (c3 / y_2) + (c4 / y_3) + (c5 / y_4);
200
+ } else {
201
+ /* DiDonato and Morris Eq 33: */
202
+ double u = -lb + (a - 1) * std::log(w) - std::log(1 + (1 - a) / (1 + w));
203
+ result = -lb + (a - 1) * std::log(u) - std::log(1 + (1 - a) / (1 + u));
204
+ }
205
+ }
206
+ } else {
207
+ double z = w;
208
+ double ap1 = a + 1;
209
+ double ap2 = a + 2;
210
+ if (w < 0.15 * ap1) {
211
+ /* DiDonato and Morris Eq 35: */
212
+ double v = std::log(p) + special::cephes::lgam(ap1);
213
+ z = std::exp((v + w) / a);
214
+ s = std::log1p(z / ap1 * (1 + z / ap2));
215
+ z = std::exp((v + z - s) / a);
216
+ s = std::log1p(z / ap1 * (1 + z / ap2));
217
+ z = std::exp((v + z - s) / a);
218
+ s = std::log1p(z / ap1 * (1 + z / ap2 * (1 + z / (a + 3))));
219
+ z = std::exp((v + z - s) / a);
220
+ }
221
+
222
+ if ((z <= 0.01 * ap1) || (z > 0.7 * ap1)) {
223
+ result = z;
224
+ } else {
225
+ /* DiDonato and Morris Eq 36: */
226
+ double ls = std::log(didonato_SN(a, z, 100, 1e-4));
227
+ double v = std::log(p) + special::cephes::lgam(ap1);
228
+ z = std::exp((v + z - ls) / a);
229
+ result = z * (1 - (a * std::log(z) - z - v + ls) / (a - z));
230
+ }
231
+ }
232
+ }
233
+ return result;
234
+ }
235
+
236
+ } // namespace detail
237
+
238
+ SPECFUN_HOST_DEVICE inline double igamci(double a, double q);
239
+
240
+ SPECFUN_HOST_DEVICE inline double igami(double a, double p) {
241
+ int i;
242
+ double x, fac, f_fp, fpp_fp;
243
+
244
+ if (std::isnan(a) || std::isnan(p)) {
245
+ return std::numeric_limits<double>::quiet_NaN();
246
+ ;
247
+ } else if ((a < 0) || (p < 0) || (p > 1)) {
248
+ set_error("gammaincinv", SF_ERROR_DOMAIN, NULL);
249
+ } else if (p == 0.0) {
250
+ return 0.0;
251
+ } else if (p == 1.0) {
252
+ return std::numeric_limits<double>::infinity();
253
+ } else if (p > 0.9) {
254
+ return igamci(a, 1 - p);
255
+ }
256
+
257
+ x = detail::find_inverse_gamma(a, p, 1 - p);
258
+ /* Halley's method */
259
+ for (i = 0; i < 3; i++) {
260
+ fac = detail::igam_fac(a, x);
261
+ if (fac == 0.0) {
262
+ return x;
263
+ }
264
+ f_fp = (igam(a, x) - p) * x / fac;
265
+ /* The ratio of the first and second derivatives simplifies */
266
+ fpp_fp = -1.0 + (a - 1) / x;
267
+ if (std::isinf(fpp_fp)) {
268
+ /* Resort to Newton's method in the case of overflow */
269
+ x = x - f_fp;
270
+ } else {
271
+ x = x - f_fp / (1.0 - 0.5 * f_fp * fpp_fp);
272
+ }
273
+ }
274
+
275
+ return x;
276
+ }
277
+
278
+ SPECFUN_HOST_DEVICE inline double igamci(double a, double q) {
279
+ int i;
280
+ double x, fac, f_fp, fpp_fp;
281
+
282
+ if (std::isnan(a) || std::isnan(q)) {
283
+ return std::numeric_limits<double>::quiet_NaN();
284
+ } else if ((a < 0.0) || (q < 0.0) || (q > 1.0)) {
285
+ set_error("gammainccinv", SF_ERROR_DOMAIN, NULL);
286
+ } else if (q == 0.0) {
287
+ return std::numeric_limits<double>::infinity();
288
+ } else if (q == 1.0) {
289
+ return 0.0;
290
+ } else if (q > 0.9) {
291
+ return igami(a, 1 - q);
292
+ }
293
+
294
+ x = detail::find_inverse_gamma(a, 1 - q, q);
295
+ for (i = 0; i < 3; i++) {
296
+ fac = detail::igam_fac(a, x);
297
+ if (fac == 0.0) {
298
+ return x;
299
+ }
300
+ f_fp = (igamc(a, x) - q) * x / (-fac);
301
+ fpp_fp = -1.0 + (a - 1) / x;
302
+ if (std::isinf(fpp_fp)) {
303
+ x = x - f_fp;
304
+ } else {
305
+ x = x - f_fp / (1.0 - 0.5 * f_fp * fpp_fp);
306
+ }
307
+ }
308
+
309
+ return x;
310
+ }
311
+
312
+ } // namespace cephes
313
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/config.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Define math constants if they are not available
4
+ #ifndef M_E
5
+ #define M_E 2.71828182845904523536
6
+ #endif
7
+
8
+ #ifndef M_LOG2E
9
+ #define M_LOG2E 1.44269504088896340736
10
+ #endif
11
+
12
+ #ifndef M_LOG10E
13
+ #define M_LOG10E 0.434294481903251827651
14
+ #endif
15
+
16
+ #ifndef M_LN2
17
+ #define M_LN2 0.693147180559945309417
18
+ #endif
19
+
20
+ #ifndef M_LN10
21
+ #define M_LN10 2.30258509299404568402
22
+ #endif
23
+
24
+ #ifndef M_PI
25
+ #define M_PI 3.14159265358979323846
26
+ #endif
27
+
28
+ #ifndef M_PI_2
29
+ #define M_PI_2 1.57079632679489661923
30
+ #endif
31
+
32
+ #ifndef M_PI_4
33
+ #define M_PI_4 0.785398163397448309616
34
+ #endif
35
+
36
+ #ifndef M_1_PI
37
+ #define M_1_PI 0.318309886183790671538
38
+ #endif
39
+
40
+ #ifndef M_2_PI
41
+ #define M_2_PI 0.636619772367581343076
42
+ #endif
43
+
44
+ #ifndef M_2_SQRTPI
45
+ #define M_2_SQRTPI 1.12837916709551257390
46
+ #endif
47
+
48
+ #ifndef M_SQRT2
49
+ #define M_SQRT2 1.41421356237309504880
50
+ #endif
51
+
52
+ #ifndef M_SQRT1_2
53
+ #define M_SQRT1_2 0.707106781186547524401
54
+ #endif
55
+
56
+ #ifdef __CUDACC__
57
+ #define SPECFUN_HOST_DEVICE __host__ __device__
58
+
59
+ #include <cuda/std/algorithm>
60
+ #include <cuda/std/cmath>
61
+ #include <cuda/std/cstdint>
62
+ #include <cuda/std/limits>
63
+ #include <cuda/std/type_traits>
64
+
65
+ // Fallback to global namespace for functions unsupported on NVRTC Jit
66
+ #ifdef _LIBCUDACXX_COMPILER_NVRTC
67
+ #include <cuda_runtime.h>
68
+ #endif
69
+
70
+ namespace std {
71
+
72
+ SPECFUN_HOST_DEVICE inline double abs(double num) { return cuda::std::abs(num); }
73
+
74
+ SPECFUN_HOST_DEVICE inline double exp(double num) { return cuda::std::exp(num); }
75
+
76
+ SPECFUN_HOST_DEVICE inline double log(double num) { return cuda::std::log(num); }
77
+
78
+ SPECFUN_HOST_DEVICE inline double sqrt(double num) { return cuda::std::sqrt(num); }
79
+
80
+ SPECFUN_HOST_DEVICE inline bool isinf(double num) { return cuda::std::isinf(num); }
81
+
82
+ SPECFUN_HOST_DEVICE inline bool isnan(double num) { return cuda::std::isnan(num); }
83
+
84
+ SPECFUN_HOST_DEVICE inline bool isfinite(double num) { return cuda::std::isfinite(num); }
85
+
86
+ SPECFUN_HOST_DEVICE inline double pow(double x, double y) { return cuda::std::pow(x, y); }
87
+
88
+ SPECFUN_HOST_DEVICE inline double sin(double x) { return cuda::std::sin(x); }
89
+
90
+ SPECFUN_HOST_DEVICE inline double cos(double x) { return cuda::std::cos(x); }
91
+
92
+ SPECFUN_HOST_DEVICE inline double tan(double x) { return cuda::std::tan(x); }
93
+
94
+ SPECFUN_HOST_DEVICE inline double atan(double x) { return cuda::std::atan(x); }
95
+
96
+ SPECFUN_HOSt_DEVICE inline double acos(double x) { return cuda::std::acos(x); }
97
+
98
+ SPECFUN_HOST_DEVICE inline double sinh(double x) { return cuda::std::sinh(x); }
99
+
100
+ SPECFUN_HOST_DEVICE inline double cosh(double x) { return cuda::std::cosh(x); }
101
+
102
+ SPECFUN_HOST_DEVICE inline double asinh(double x) { return cuda::std::asinh(x); }
103
+
104
+ SPECFUN_HOST_DEVICE inline bool signbit(double x) { return cuda::std::signbit(x); }
105
+
106
+ // Fallback to global namespace for functions unsupported on NVRTC
107
+ #ifndef _LIBCUDACXX_COMPILER_NVRTC
108
+ SPECFUN_HOST_DEVICE inline double ceil(double x) { return cuda::std::ceil(x); }
109
+ SPECFUN_HOST_DEVICE inline double floor(double x) { return cuda::std::floor(x); }
110
+ SPECFUN_HOST_DEVICE inline double round(double x) { return cuda::std::round(x); }
111
+ SPECFUN_HOST_DEVICE inline double trunc(double x) { return cuda::std::trunc(x); }
112
+ SPECFUN_HOST_DEVICE inline double fma(double x, double y, double z) { return cuda::std::fma(x, y, z); }
113
+ SPECFUN_HOST_DEVICE inline double copysign(double x, double y) { return cuda::std::copysign(x, y); }
114
+ SPECFUN_HOST_DEVICE inline double modf(double value, double *iptr) { return cuda::std::modf(value, iptr); }
115
+ SPECFUN_HOST_DEVICE inline double fmax(double x, double y) { return cuda::std::fmax(x, y); }
116
+ SPECFUN_HOST_DEVICE inline double fmin(double x, double y) { return cuda::std::fmin(x, y); }
117
+ SPECFUN_HOST_DEVICE inline double log10(double num) { return cuda::std::log10(num); }
118
+ SPECFUN_HOST_DEVICE inline double log1p(double num) { return cuda::std::log1p(num); }
119
+ SPECFUN_HOST_DEVICE inline double frexp(double num, int *exp) { return cuda::std::frexp(num); }
120
+ SPECFUN_HOST_DEVICE inline double ldexp(double num, int *exp) { return cuda::std::ldexp(num); }
121
+ SPECFUN_HOST_DEVICE inline double fmod(double x, double y) { return cuda::std::fmod(x, y); }
122
+ #else
123
+ SPECFUN_HOST_DEVICE inline double ceil(double x) { return ::ceil(x); }
124
+ SPECFUN_HOST_DEVICE inline double floor(double x) { return ::floor(x); }
125
+ SPECFUN_HOST_DEVICE inline double round(double x) { return ::round(x); }
126
+ SPECFUN_HOST_DEVICE inline double trunc(double x) { return ::trunc(x); }
127
+ SPECFUN_HOST_DEVICE inline double fma(double x, double y, double z) { return ::fma(x, y, z); }
128
+ SPECFUN_HOST_DEVICE inline double copysign(double x, double y) { return ::copysign(x, y); }
129
+ SPECFUN_HOST_DEVICE inline double modf(double value, double *iptr) { return ::modf(value, iptr); }
130
+ SPECFUN_HOST_DEVICE inline double fmax(double x, double y) { return ::fmax(x, y); }
131
+ SPECFUN_HOST_DEVICE inline double fmin(double x, double y) { return ::fmin(x, y); }
132
+ SPECFUN_HOST_DEVICE inline double log10(double num) { return ::log10(num); }
133
+ SPECFUN_HOST_DEVICE inline double log1p(double num) { return ::log1p(num); }
134
+ SPECFUN_HOST_DEVICE inline double frexp(double num, int *exp) { return ::frexp(num); }
135
+ SPECFUN_HOST_DEVICE inline double ldexp(double num, int *exp) { return ::ldexp(num); }
136
+ SPECFUN_HOST_DEVICE inline double fmod(double x, double y) { return ::fmod(x, y); }
137
+ #endif
138
+
139
+ template <typename T>
140
+ SPECFUN_HOST_DEVICE void swap(T &a, T &b) {
141
+ cuda::std::swap(a, b);
142
+ }
143
+
144
+ template <typename T>
145
+ SPECFUN_HOST_DEVICE const T &clamp(const T &v, const T &lo, const T &hi) {
146
+ return cuda::std::clamp(v, lo, hi);
147
+ }
148
+
149
+ template <typename T>
150
+ using numeric_limits = cuda::std::numeric_limits<T>;
151
+
152
+ // Must use thrust for complex types in order to support CuPy
153
+ template <typename T>
154
+ using complex = thrust::complex<T>;
155
+
156
+ template <typename T>
157
+ SPECFUN_HOST_DEVICE T abs(const complex<T> &z) {
158
+ return thrust::abs(z);
159
+ }
160
+
161
+ template <typename T>
162
+ SPECFUN_HOST_DEVICE complex<T> exp(const complex<T> &z) {
163
+ return thrust::exp(z);
164
+ }
165
+
166
+ template <typename T>
167
+ SPECFUN_HOST_DEVICE complex<T> log(const complex<T> &z) {
168
+ return thrust::log(z);
169
+ }
170
+
171
+ template <typename T>
172
+ SPECFUN_HOST_DEVICE T norm(const complex<T> &z) {
173
+ return thrust::norm(z);
174
+ }
175
+
176
+ template <typename T>
177
+ SPECFUN_HOST_DEVICE complex<T> sqrt(const complex<T> &z) {
178
+ return thrust::sqrt(z);
179
+ }
180
+
181
+ template <typename T>
182
+ SPECFUN_HOST_DEVICE complex<T> conj(const complex<T> &z) {
183
+ return thrust::conj(z);
184
+ }
185
+
186
+ template <typename T>
187
+ SPECFUN_HOST_DEVICE complex<T> pow(const complex<T> &x, const complex<T> &y) {
188
+ return thrust::pow(x, y);
189
+ }
190
+
191
+ template <typename T>
192
+ SPECFUN_HOST_DEVICE complex<T> pow(const complex<T> &x, const T &y) {
193
+ return thrust::pow(x, y);
194
+ }
195
+
196
+ // Other types and utilities
197
+ using cuda::std::is_floating_point;
198
+ using cuda::std::pair;
199
+ using cuda::std::uint64_t;
200
+
201
+ #define SPECFUN_ASSERT(a)
202
+
203
+ } // namespace std
204
+
205
+ #else
206
+ #define SPECFUN_HOST_DEVICE
207
+
208
+ #include <algorithm>
209
+ #include <cassert>
210
+ #include <cmath>
211
+ #include <complex>
212
+ #include <cstdint>
213
+ #include <cstddef>
214
+ #include <iterator>
215
+ #include <limits>
216
+ #include <math.h>
217
+ #include <type_traits>
218
+ #include <utility>
219
+
220
+ #ifdef DEBUG
221
+ #define SPECFUN_ASSERT(a) assert(a)
222
+ #else
223
+ #define SPECFUN_ASSERT(a)
224
+ #endif
225
+
226
+ #endif
parrot/lib/python3.10/site-packages/scipy/special/special/digamma.h ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2024.
2
+ * Original header comment appears below.
3
+ */
4
+
5
+ /* An implementation of the digamma function for complex arguments.
6
+ *
7
+ * Author: Josh Wilson
8
+ *
9
+ * Distributed under the same license as Scipy.
10
+ *
11
+ * Sources:
12
+ * [1] "The Digital Library of Mathematical Functions", dlmf.nist.gov
13
+ *
14
+ * [2] mpmath (version 0.19), http://mpmath.org
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include "cephes/psi.h"
20
+ #include "cephes/zeta.h"
21
+ #include "config.h"
22
+ #include "error.h"
23
+ #include "trig.h"
24
+
25
+ namespace special {
26
+ namespace detail {
27
+ // All of the following were computed with mpmath
28
+ // Location of the positive root
29
+ constexpr double digamma_posroot = 1.4616321449683623;
30
+ // Value of the positive root
31
+ constexpr double digamma_posrootval = -9.2412655217294275e-17;
32
+ // Location of the negative root
33
+ constexpr double digamma_negroot = -0.504083008264455409;
34
+ // Value of the negative root
35
+ constexpr double digamma_negrootval = 7.2897639029768949e-17;
36
+
37
+ template <typename T>
38
+ SPECFUN_HOST_DEVICE T digamma_zeta_series(T z, double root, double rootval) {
39
+ T res = rootval;
40
+ T coeff = -1.0;
41
+
42
+ z = z - root;
43
+ T term;
44
+ for (int n = 1; n < 100; n++) {
45
+ coeff *= -z;
46
+ term = coeff * cephes::zeta(n + 1, root);
47
+ res += term;
48
+ if (std::abs(term) < std::numeric_limits<double>::epsilon() * std::abs(res)) {
49
+ break;
50
+ }
51
+ }
52
+ return res;
53
+ }
54
+
55
+ SPECFUN_HOST_DEVICE inline std::complex<double> digamma_forward_recurrence(std::complex<double> z,
56
+ std::complex<double> psiz, int n) {
57
+ /* Compute digamma(z + n) using digamma(z) using the recurrence
58
+ * relation
59
+ *
60
+ * digamma(z + 1) = digamma(z) + 1/z.
61
+ *
62
+ * See https://dlmf.nist.gov/5.5#E2 */
63
+ std::complex<double> res = psiz;
64
+
65
+ for (int k = 0; k < n; k++) {
66
+ res += 1.0 / (z + static_cast<double>(k));
67
+ }
68
+ return res;
69
+ }
70
+
71
+ SPECFUN_HOST_DEVICE inline std::complex<double> digamma_backward_recurrence(std::complex<double> z,
72
+ std::complex<double> psiz, int n) {
73
+ /* Compute digamma(z - n) using digamma(z) and a recurrence relation. */
74
+ std::complex<double> res = psiz;
75
+
76
+ for (int k = 1; k < n + 1; k++) {
77
+ res -= 1.0 / (z - static_cast<double>(k));
78
+ }
79
+ return res;
80
+ }
81
+
82
+ SPECFUN_HOST_DEVICE inline std::complex<double> digamma_asymptotic_series(std::complex<double> z) {
83
+ /* Evaluate digamma using an asymptotic series. See
84
+ *
85
+ * https://dlmf.nist.gov/5.11#E2 */
86
+ double bernoulli2k[] = {
87
+ 0.166666666666666667, -0.0333333333333333333, 0.0238095238095238095, -0.0333333333333333333,
88
+ 0.0757575757575757576, -0.253113553113553114, 1.16666666666666667, -7.09215686274509804,
89
+ 54.9711779448621554, -529.124242424242424, 6192.12318840579710, -86580.2531135531136,
90
+ 1425517.16666666667, -27298231.0678160920, 601580873.900642368, -15116315767.0921569};
91
+ std::complex<double> rzz = 1.0 / z / z;
92
+ std::complex<double> zfac = 1.0;
93
+ std::complex<double> term;
94
+ std::complex<double> res;
95
+
96
+ if (!(std::isfinite(z.real()) && std::isfinite(z.imag()))) {
97
+ /* Check for infinity (or nan) and return early.
98
+ * Result of division by complex infinity is implementation dependent.
99
+ * and has been observed to vary between C++ stdlib and CUDA stdlib.
100
+ */
101
+ return std::log(z);
102
+ }
103
+
104
+ res = std::log(z) - 0.5 / z;
105
+
106
+ for (int k = 1; k < 17; k++) {
107
+ zfac *= rzz;
108
+ term = -bernoulli2k[k - 1] * zfac / (2 * static_cast<double>(k));
109
+ res += term;
110
+ if (std::abs(term) < std::numeric_limits<double>::epsilon() * std::abs(res)) {
111
+ break;
112
+ }
113
+ }
114
+ return res;
115
+ }
116
+
117
+ } // namespace detail
118
+
119
+ SPECFUN_HOST_DEVICE inline double digamma(double z) {
120
+ /* Wrap Cephes' psi to take advantage of the series expansion around
121
+ * the smallest negative zero.
122
+ */
123
+ if (std::abs(z - detail::digamma_negroot) < 0.3) {
124
+ return detail::digamma_zeta_series(z, detail::digamma_negroot, detail::digamma_negrootval);
125
+ }
126
+ return cephes::psi(z);
127
+ }
128
+
129
+ SPECFUN_HOST_DEVICE inline float digamma(float z) { return static_cast<float>(digamma(static_cast<double>(z))); }
130
+
131
+ SPECFUN_HOST_DEVICE inline std::complex<double> digamma(std::complex<double> z) {
132
+ /*
133
+ * Compute the digamma function for complex arguments. The strategy
134
+ * is:
135
+ *
136
+ * - Around the two zeros closest to the origin (posroot and negroot)
137
+ * use a Taylor series with precomputed zero order coefficient.
138
+ * - If close to the origin, use a recurrence relation to step away
139
+ * from the origin.
140
+ * - If close to the negative real axis, use the reflection formula
141
+ * to move to the right halfplane.
142
+ * - If |z| is large (> 16), use the asymptotic series.
143
+ * - If |z| is small, use a recurrence relation to make |z| large
144
+ * enough to use the asymptotic series.
145
+ */
146
+ double absz = std::abs(z);
147
+ std::complex<double> res = 0;
148
+ /* Use the asymptotic series for z away from the negative real axis
149
+ * with abs(z) > smallabsz. */
150
+ int smallabsz = 16;
151
+ /* Use the reflection principle for z with z.real < 0 that are within
152
+ * smallimag of the negative real axis.
153
+ * int smallimag = 6 # unused below except in a comment */
154
+
155
+ if (z.real() <= 0.0 && std::ceil(z.real()) == z) {
156
+ // Poles
157
+ set_error("digamma", SF_ERROR_SINGULAR, NULL);
158
+ return {std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()};
159
+ }
160
+ if (std::abs(z - detail::digamma_negroot) < 0.3) {
161
+ // First negative root.
162
+ return detail::digamma_zeta_series(z, detail::digamma_negroot, detail::digamma_negrootval);
163
+ }
164
+
165
+ if (z.real() < 0 and std::abs(z.imag()) < smallabsz) {
166
+ /* Reflection formula for digamma. See
167
+ *
168
+ *https://dlmf.nist.gov/5.5#E4
169
+ */
170
+ res = -M_PI * cospi(z) / sinpi(z);
171
+ z = 1.0 - z;
172
+ absz = std::abs(z);
173
+ }
174
+
175
+ if (absz < 0.5) {
176
+ /* Use one step of the recurrence relation to step away from
177
+ * the pole. */
178
+ res = -1.0 / z;
179
+ z += 1.0;
180
+ absz = std::abs(z);
181
+ }
182
+
183
+ if (std::abs(z - detail::digamma_posroot) < 0.5) {
184
+ res += detail::digamma_zeta_series(z, detail::digamma_posroot, detail::digamma_posrootval);
185
+ } else if (absz > smallabsz) {
186
+ res += detail::digamma_asymptotic_series(z);
187
+ } else if (z.real() >= 0.0) {
188
+ double n = std::trunc(smallabsz - absz) + 1;
189
+ std::complex<double> init = detail::digamma_asymptotic_series(z + n);
190
+ res += detail::digamma_backward_recurrence(z + n, init, n);
191
+ } else {
192
+ // z.real() < 0, absz < smallabsz, and z.imag() > smallimag
193
+ double n = std::trunc(smallabsz - absz) - 1;
194
+ std::complex<double> init = detail::digamma_asymptotic_series(z - n);
195
+ res += detail::digamma_forward_recurrence(z - n, init, n);
196
+ }
197
+ return res;
198
+ }
199
+
200
+ SPECFUN_HOST_DEVICE inline std::complex<float> digamma(std::complex<float> z) {
201
+ return static_cast<std::complex<float>>(digamma(static_cast<std::complex<double>>(z)));
202
+ }
203
+
204
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/error.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // should be included from config.h, but that won't work until we've cleanly separated out the C and C++ parts of the
4
+ // code
5
+ #ifdef __CUDACC__
6
+ #define SPECFUN_HOST_DEVICE __host__ __device__
7
+ #else
8
+ #define SPECFUN_HOST_DEVICE
9
+ #endif
10
+
11
+ typedef enum {
12
+ SF_ERROR_OK = 0, /* no error */
13
+ SF_ERROR_SINGULAR, /* singularity encountered */
14
+ SF_ERROR_UNDERFLOW, /* floating point underflow */
15
+ SF_ERROR_OVERFLOW, /* floating point overflow */
16
+ SF_ERROR_SLOW, /* too many iterations required */
17
+ SF_ERROR_LOSS, /* loss of precision */
18
+ SF_ERROR_NO_RESULT, /* no result obtained */
19
+ SF_ERROR_DOMAIN, /* out of domain */
20
+ SF_ERROR_ARG, /* invalid input parameter */
21
+ SF_ERROR_OTHER, /* unclassified error */
22
+ SF_ERROR__LAST
23
+ } sf_error_t;
24
+
25
+ #ifdef __cplusplus
26
+
27
+ #include <complex>
28
+
29
+ namespace special {
30
+
31
+ #ifndef SP_SPECFUN_ERROR
32
+ SPECFUN_HOST_DEVICE inline void set_error(const char *func_name, sf_error_t code, const char *fmt, ...) {
33
+ // nothing
34
+ }
35
+ #else
36
+ void set_error(const char *func_name, sf_error_t code, const char *fmt, ...);
37
+ #endif
38
+
39
+ template <typename T>
40
+ void set_error_and_nan(const char *name, sf_error_t code, T &value) {
41
+ if (code != SF_ERROR_OK) {
42
+ set_error(name, code, nullptr);
43
+
44
+ if (code == SF_ERROR_DOMAIN || code == SF_ERROR_OVERFLOW || code == SF_ERROR_NO_RESULT) {
45
+ value = std::numeric_limits<T>::quiet_NaN();
46
+ }
47
+ }
48
+ }
49
+
50
+ template <typename T>
51
+ void set_error_and_nan(const char *name, sf_error_t code, std::complex<T> &value) {
52
+ if (code != SF_ERROR_OK) {
53
+ set_error(name, code, nullptr);
54
+
55
+ if (code == SF_ERROR_DOMAIN || code == SF_ERROR_OVERFLOW || code == SF_ERROR_NO_RESULT) {
56
+ value.real(std::numeric_limits<T>::quiet_NaN());
57
+ value.imag(std::numeric_limits<T>::quiet_NaN());
58
+ }
59
+ }
60
+ }
61
+
62
+ } // namespace special
63
+
64
+ #endif
parrot/lib/python3.10/site-packages/scipy/special/special/evalpoly.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2024.
2
+ *
3
+ * Original author: Josh Wilson, 2016.
4
+ */
5
+
6
+ /* Evaluate polynomials.
7
+ *
8
+ * All of the coefficients are stored in reverse order, i.e. if the
9
+ * polynomial is
10
+ *
11
+ * u_n x^n + u_{n - 1} x^{n - 1} + ... + u_0,
12
+ *
13
+ * then coeffs[0] = u_n, coeffs[1] = u_{n - 1}, ..., coeffs[n] = u_0.
14
+ *
15
+ * References
16
+ * ----------
17
+ * [1] Knuth, "The Art of Computer Programming, Volume II"
18
+ */
19
+
20
+ #pragma once
21
+
22
+ #include "config.h"
23
+
24
+ namespace special {
25
+
26
+ SPECFUN_HOST_DEVICE inline std::complex<double> cevalpoly(const double *coeffs, int degree, std::complex<double> z) {
27
+ /* Evaluate a polynomial with real coefficients at a complex point.
28
+ *
29
+ * Uses equation (3) in section 4.6.4 of [1]. Note that it is more
30
+ * efficient than Horner's method.
31
+ */
32
+ double a = coeffs[0];
33
+ double b = coeffs[1];
34
+ double r = 2 * z.real();
35
+ double s = std::norm(z);
36
+ double tmp;
37
+
38
+ for (int j = 2; j < degree + 1; j++) {
39
+ tmp = b;
40
+ b = std::fma(-s, a, coeffs[j]);
41
+ a = std::fma(r, a, tmp);
42
+ }
43
+
44
+ return z * a + b;
45
+ }
46
+
47
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/hyp2f1.h ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Implementation of Gauss's hypergeometric function for complex values.
2
+ *
3
+ * This implementation is based on the Fortran implementation by Shanjie Zhang and
4
+ * Jianming Jin included in specfun.f [1]_. Computation of Gauss's hypergeometric
5
+ * function involves handling a patchwork of special cases. By default the Zhang and
6
+ * Jin implementation has been followed as closely as possible except for situations where
7
+ * an improvement was obvious. We've attempted to document the reasons behind decisions
8
+ * made by Zhang and Jin and to document the reasons for deviating from their implementation
9
+ * when this has been done. References to the NIST Digital Library of Mathematical
10
+ * Functions [2]_ have been added where they are appropriate. The review paper by
11
+ * Pearson et al [3]_ is an excellent resource for best practices for numerical
12
+ * computation of hypergeometric functions. We have followed this review paper
13
+ * when making improvements to and correcting defects in Zhang and Jin's
14
+ * implementation. When Pearson et al propose several competing alternatives for a
15
+ * given case, we've used our best judgment to decide on the method to use.
16
+ *
17
+ * Author: Albert Steppi
18
+ *
19
+ * Distributed under the same license as Scipy.
20
+ *
21
+ * References
22
+ * ----------
23
+ * .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
24
+ * .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/,
25
+ * Release 1.1.1 of 2021-03-15. F. W. J. Olver, A. B. Olde Daalhuis,
26
+ * D. W. Lozier, B. I. Schneider, R. F. Boisvert, C. W. Clark, B. R. Miller,
27
+ * B. V. Saunders, H. S. Cohl, and M. A. McClain, eds.
28
+ * .. [3] Pearson, J.W., Olver, S. & Porter, M.A.
29
+ * "Numerical methods for the computation of the confluent and Gauss
30
+ * hypergeometric functions."
31
+ * Numer Algor 74, 821-866 (2017). https://doi.org/10.1007/s11075-016-0173-0
32
+ * .. [4] Raimundas Vidunas, "Degenerate Gauss Hypergeometric Functions",
33
+ * Kyushu Journal of Mathematics, 2007, Volume 61, Issue 1, Pages 109-135,
34
+ * .. [5] López, J.L., Temme, N.M. New series expansions of the Gauss hypergeometric
35
+ * function. Adv Comput Math 39, 349-365 (2013).
36
+ * https://doi.org/10.1007/s10444-012-9283-y
37
+ * """
38
+ */
39
+
40
+ #pragma once
41
+
42
+ #include "config.h"
43
+ #include "error.h"
44
+ #include "tools.h"
45
+
46
+ #include "binom.h"
47
+ #include "cephes/gamma.h"
48
+ #include "cephes/lanczos.h"
49
+ #include "cephes/poch.h"
50
+ #include "cephes/hyp2f1.h"
51
+ #include "digamma.h"
52
+
53
+ namespace special {
54
+ namespace detail {
55
+ constexpr double hyp2f1_EPS = 1e-15;
56
+ /* The original implementation in SciPy from Zhang and Jin used 1500 for the
57
+ * maximum number of series iterations in some cases and 500 in others.
58
+ * Through the empirical results on the test cases in
59
+ * scipy/special/_precompute/hyp2f1_data.py, it was determined that these values
60
+ * can lead to early termination of series which would have eventually converged
61
+ * at a reasonable level of accuracy. We've bumped the iteration limit to 3000,
62
+ * and may adjust it again based on further analysis. */
63
+ constexpr std::uint64_t hyp2f1_MAXITER = 3000;
64
+
65
+ SPECFUN_HOST_DEVICE inline double four_gammas_lanczos(double u, double v, double w, double x) {
66
+ /* Compute ratio of gamma functions using lanczos approximation.
67
+ *
68
+ * Computes gamma(u)*gamma(v)/(gamma(w)*gamma(x))
69
+ *
70
+ * It is assumed that x = u + v - w, but it is left to the user to
71
+ * ensure this.
72
+ *
73
+ * The lanczos approximation takes the form
74
+ *
75
+ * gamma(x) = factor(x) * lanczos_sum_expg_scaled(x)
76
+ *
77
+ * where factor(x) = ((x + lanczos_g - 0.5)/e)**(x - 0.5).
78
+ *
79
+ * The formula above is only valid for x >= 0.5, but can be extended to
80
+ * x < 0.5 with the reflection principle.
81
+ *
82
+ * Using the lanczos approximation when computing this ratio of gamma functions
83
+ * allows factors to be combined analytically to avoid underflow and overflow
84
+ * and produce a more accurate result. The condition x = u + v - w makes it
85
+ * possible to cancel the factors in the expression
86
+ *
87
+ * factor(u) * factor(v) / (factor(w) * factor(x))
88
+ *
89
+ * by taking one factor and absorbing it into the others. Currently, this
90
+ * implementation takes the factor corresponding to the argument with largest
91
+ * absolute value and absorbs it into the others.
92
+ *
93
+ * Since this is only called internally by four_gammas. It is assumed that
94
+ * |u| >= |v| and |w| >= |x|.
95
+ */
96
+
97
+ /* The below implementation may incorrectly return finite results
98
+ * at poles of the gamma function. Handle these cases explicitly. */
99
+ if ((u == std::trunc(u) && u <= 0) || (v == std::trunc(v) && v <= 0)) {
100
+ /* Return nan if numerator has pole. Diverges to +- infinity
101
+ * depending on direction so value is undefined. */
102
+ return std::numeric_limits<double>::quiet_NaN();
103
+ }
104
+ if ((w == std::trunc(w) && w <= 0) || (x == std::trunc(x) && x <= 0)) {
105
+ // Return 0 if denominator has pole but not numerator.
106
+ return 0.0;
107
+ }
108
+
109
+ double result = 1.0;
110
+ double ugh, vgh, wgh, xgh, u_prime, v_prime, w_prime, x_prime;
111
+
112
+ if (u >= 0.5) {
113
+ result *= cephes::lanczos_sum_expg_scaled(u);
114
+ ugh = u + cephes::lanczos_g - 0.5;
115
+ u_prime = u;
116
+ } else {
117
+ result /= cephes::lanczos_sum_expg_scaled(1 - u) * std::sin(M_PI * u) * M_1_PI;
118
+ ugh = 0.5 - u + cephes::lanczos_g;
119
+ u_prime = 1 - u;
120
+ }
121
+
122
+ if (v >= 0.5) {
123
+ result *= cephes::lanczos_sum_expg_scaled(v);
124
+ vgh = v + cephes::lanczos_g - 0.5;
125
+ v_prime = v;
126
+ } else {
127
+ result /= cephes::lanczos_sum_expg_scaled(1 - v) * std::sin(M_PI * v) * M_1_PI;
128
+ vgh = 0.5 - v + cephes::lanczos_g;
129
+ v_prime = 1 - v;
130
+ }
131
+
132
+ if (w >= 0.5) {
133
+ result /= cephes::lanczos_sum_expg_scaled(w);
134
+ wgh = w + cephes::lanczos_g - 0.5;
135
+ w_prime = w;
136
+ } else {
137
+ result *= cephes::lanczos_sum_expg_scaled(1 - w) * std::sin(M_PI * w) * M_1_PI;
138
+ wgh = 0.5 - w + cephes::lanczos_g;
139
+ w_prime = 1 - w;
140
+ }
141
+
142
+ if (x >= 0.5) {
143
+ result /= cephes::lanczos_sum_expg_scaled(x);
144
+ xgh = x + cephes::lanczos_g - 0.5;
145
+ x_prime = x;
146
+ } else {
147
+ result *= cephes::lanczos_sum_expg_scaled(1 - x) * std::sin(M_PI * x) * M_1_PI;
148
+ xgh = 0.5 - x + cephes::lanczos_g;
149
+ x_prime = 1 - x;
150
+ }
151
+
152
+ if (std::abs(u) >= std::abs(w)) {
153
+ // u has greatest absolute value. Absorb ugh into the others.
154
+ if (std::abs((v_prime - u_prime) * (v - 0.5)) < 100 * ugh and v > 100) {
155
+ /* Special case where base is close to 1. Condition taken from
156
+ * Boost's beta function implementation. */
157
+ result *= std::exp((v - 0.5) * std::log1p((v_prime - u_prime) / ugh));
158
+ } else {
159
+ result *= std::pow(vgh / ugh, v - 0.5);
160
+ }
161
+
162
+ if (std::abs((u_prime - w_prime) * (w - 0.5)) < 100 * wgh and u > 100) {
163
+ result *= std::exp((w - 0.5) * std::log1p((u_prime - w_prime) / wgh));
164
+ } else {
165
+ result *= std::pow(ugh / wgh, w - 0.5);
166
+ }
167
+
168
+ if (std::abs((u_prime - x_prime) * (x - 0.5)) < 100 * xgh and u > 100) {
169
+ result *= std::exp((x - 0.5) * std::log1p((u_prime - x_prime) / xgh));
170
+ } else {
171
+ result *= std::pow(ugh / xgh, x - 0.5);
172
+ }
173
+ } else {
174
+ // w has greatest absolute value. Absorb wgh into the others.
175
+ if (std::abs((u_prime - w_prime) * (u - 0.5)) < 100 * wgh and u > 100) {
176
+ result *= std::exp((u - 0.5) * std::log1p((u_prime - w_prime) / wgh));
177
+ } else {
178
+ result *= pow(ugh / wgh, u - 0.5);
179
+ }
180
+ if (std::abs((v_prime - w_prime) * (v - 0.5)) < 100 * wgh and v > 100) {
181
+ result *= std::exp((v - 0.5) * std::log1p((v_prime - w_prime) / wgh));
182
+ } else {
183
+ result *= std::pow(vgh / wgh, v - 0.5);
184
+ }
185
+ if (std::abs((w_prime - x_prime) * (x - 0.5)) < 100 * xgh and x > 100) {
186
+ result *= std::exp((x - 0.5) * std::log1p((w_prime - x_prime) / xgh));
187
+ } else {
188
+ result *= std::pow(wgh / xgh, x - 0.5);
189
+ }
190
+ }
191
+ // This exhausts all cases because we assume |u| >= |v| and |w| >= |x|.
192
+
193
+ return result;
194
+ }
195
+
196
+ SPECFUN_HOST_DEVICE inline double four_gammas(double u, double v, double w, double x) {
197
+ double result;
198
+
199
+ // Without loss of generality, assume |u| >= |v| and |w| >= |x|.
200
+ if (std::abs(u) > std::abs(v)) {
201
+ std::swap(u, v);
202
+ }
203
+ if (std::abs(x) > std::abs(w)) {
204
+ std::swap(x, w);
205
+ }
206
+ /* Direct ratio tends to be more accurate for arguments in this range. Range
207
+ * chosen empirically based on the relevant benchmarks in
208
+ * scipy/special/_precompute/hyp2f1_data.py */
209
+ if (std::abs(u) <= 100 && std::abs(v) <= 100 && std::abs(w) <= 100 && std::abs(x) <= 100) {
210
+ result = cephes::Gamma(u) * cephes::Gamma(v) / (cephes::Gamma(w) * cephes::Gamma(x));
211
+ if (std::isfinite(result) && result != 0.0) {
212
+ return result;
213
+ }
214
+ }
215
+ result = four_gammas_lanczos(u, v, w, x);
216
+ if (std::isfinite(result) && result != 0.0) {
217
+ return result;
218
+ }
219
+ // If overflow or underflow, try again with logs.
220
+ result = std::exp(cephes::lgam(v) - cephes::lgam(x) + cephes::lgam(u) - cephes::lgam(w));
221
+ result *= cephes::gammasgn(u) * cephes::gammasgn(w) * cephes::gammasgn(v) * cephes::gammasgn(x);
222
+ return result;
223
+ }
224
+
225
+ class HypergeometricSeriesGenerator {
226
+ /* Maclaurin series for hyp2f1.
227
+ *
228
+ * Series is convergent for |z| < 1 but is only practical for numerical
229
+ * computation when |z| < 0.9.
230
+ */
231
+ public:
232
+ SPECFUN_HOST_DEVICE HypergeometricSeriesGenerator(double a, double b, double c, std::complex<double> z)
233
+ : a_(a), b_(b), c_(c), z_(z), term_(1.0), k_(0) {}
234
+
235
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
236
+ std::complex<double> output = term_;
237
+ term_ = term_ * (a_ + k_) * (b_ + k_) / ((k_ + 1) * (c_ + k_)) * z_;
238
+ ++k_;
239
+ return output;
240
+ }
241
+
242
+ private:
243
+ double a_, b_, c_;
244
+ std::complex<double> z_, term_;
245
+ std::uint64_t k_;
246
+ };
247
+
248
+ class Hyp2f1Transform1Generator {
249
+ /* 1 -z transformation of standard series.*/
250
+ public:
251
+ SPECFUN_HOST_DEVICE Hyp2f1Transform1Generator(double a, double b, double c, std::complex<double> z)
252
+ : factor1_(four_gammas(c, c - a - b, c - a, c - b)),
253
+ factor2_(four_gammas(c, a + b - c, a, b) * std::pow(1.0 - z, c - a - b)),
254
+ generator1_(HypergeometricSeriesGenerator(a, b, a + b - c + 1, 1.0 - z)),
255
+ generator2_(HypergeometricSeriesGenerator(c - a, c - b, c - a - b + 1, 1.0 - z)) {}
256
+
257
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
258
+ return factor1_ * generator1_() + factor2_ * generator2_();
259
+ }
260
+
261
+ private:
262
+ std::complex<double> factor1_, factor2_;
263
+ HypergeometricSeriesGenerator generator1_, generator2_;
264
+ };
265
+
266
+ class Hyp2f1Transform1LimitSeriesGenerator {
267
+ /* 1 - z transform in limit as c - a - b approaches an integer m. */
268
+ public:
269
+ SPECFUN_HOST_DEVICE Hyp2f1Transform1LimitSeriesGenerator(double a, double b, double m, std::complex<double> z)
270
+ : d1_(special::digamma(a)), d2_(special::digamma(b)), d3_(special::digamma(1 + m)),
271
+ d4_(special::digamma(1.0)), a_(a), b_(b), m_(m), z_(z), log_1_z_(std::log(1.0 - z)),
272
+ factor_(1.0 / cephes::Gamma(m + 1)), k_(0) {}
273
+
274
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
275
+ std::complex<double> term_ = (d1_ + d2_ - d3_ - d4_ + log_1_z_) * factor_;
276
+ // Use digamma(x + 1) = digamma(x) + 1/x
277
+ d1_ += 1 / (a_ + k_); // d1 = digamma(a + k)
278
+ d2_ += 1 / (b_ + k_); // d2 = digamma(b + k)
279
+ d3_ += 1 / (1.0 + m_ + k_); // d3 = digamma(1 + m + k)
280
+ d4_ += 1 / (1.0 + k_); // d4 = digamma(1 + k)
281
+ factor_ *= (a_ + k_) * (b_ + k_) / ((k_ + 1.0) * (m_ + k_ + 1)) * (1.0 - z_);
282
+ ++k_;
283
+ return term_;
284
+ }
285
+
286
+ private:
287
+ double d1_, d2_, d3_, d4_, a_, b_, m_;
288
+ std::complex<double> z_, log_1_z_, factor_;
289
+ int k_;
290
+ };
291
+
292
+ class Hyp2f1Transform2Generator {
293
+ /* 1/z transformation of standard series.*/
294
+ public:
295
+ SPECFUN_HOST_DEVICE Hyp2f1Transform2Generator(double a, double b, double c, std::complex<double> z)
296
+ : factor1_(four_gammas(c, b - a, b, c - a) * std::pow(-z, -a)),
297
+ factor2_(four_gammas(c, a - b, a, c - b) * std::pow(-z, -b)),
298
+ generator1_(HypergeometricSeriesGenerator(a, a - c + 1, a - b + 1, 1.0 / z)),
299
+ generator2_(HypergeometricSeriesGenerator(b, b - c + 1, b - a + 1, 1.0 / z)) {}
300
+
301
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
302
+ return factor1_ * generator1_() + factor2_ * generator2_();
303
+ }
304
+
305
+ private:
306
+ std::complex<double> factor1_, factor2_;
307
+ HypergeometricSeriesGenerator generator1_, generator2_;
308
+ };
309
+
310
+ class Hyp2f1Transform2LimitSeriesGenerator {
311
+ /* 1/z transform in limit as a - b approaches a non-negative integer m. (Can swap a and b to
312
+ * handle the m a negative integer case. */
313
+ public:
314
+ SPECFUN_HOST_DEVICE Hyp2f1Transform2LimitSeriesGenerator(double a, double b, double c, double m,
315
+ std::complex<double> z)
316
+ : d1_(special::digamma(1.0)), d2_(special::digamma(1 + m)), d3_(special::digamma(a)),
317
+ d4_(special::digamma(c - a)), a_(a), b_(b), c_(c), m_(m), z_(z), log_neg_z_(std::log(-z)),
318
+ factor_(special::cephes::poch(b, m) * special::cephes::poch(1 - c + b, m) /
319
+ special::cephes::Gamma(m + 1)),
320
+ k_(0) {}
321
+
322
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
323
+ std::complex<double> term = (d1_ + d2_ - d3_ - d4_ + log_neg_z_) * factor_;
324
+ // Use digamma(x + 1) = digamma(x) + 1/x
325
+ d1_ += 1 / (1.0 + k_); // d1 = digamma(1 + k)
326
+ d2_ += 1 / (1.0 + m_ + k_); // d2 = digamma(1 + m + k)
327
+ d3_ += 1 / (a_ + k_); // d3 = digamma(a + k)
328
+ d4_ -= 1 / (c_ - a_ - k_ - 1); // d4 = digamma(c - a - k)
329
+ factor_ *= (b_ + m_ + k_) * (1 - c_ + b_ + m_ + k_) / ((k_ + 1) * (m_ + k_ + 1)) / z_;
330
+ ++k_;
331
+ return term;
332
+ }
333
+
334
+ private:
335
+ double d1_, d2_, d3_, d4_, a_, b_, c_, m_;
336
+ std::complex<double> z_, log_neg_z_, factor_;
337
+ std::uint64_t k_;
338
+ };
339
+
340
+ class Hyp2f1Transform2LimitSeriesCminusAIntGenerator {
341
+ /* 1/z transform in limit as a - b approaches a non-negative integer m, and c - a approaches
342
+ * a positive integer n. */
343
+ public:
344
+ SPECFUN_HOST_DEVICE Hyp2f1Transform2LimitSeriesCminusAIntGenerator(double a, double b, double c, double m,
345
+ double n, std::complex<double> z)
346
+ : d1_(special::digamma(1.0)), d2_(special::digamma(1 + m)), d3_(special::digamma(a)),
347
+ d4_(special::digamma(n)), a_(a), b_(b), c_(c), m_(m), n_(n), z_(z), log_neg_z_(std::log(-z)),
348
+ factor_(special::cephes::poch(b, m) * special::cephes::poch(1 - c + b, m) /
349
+ special::cephes::Gamma(m + 1)),
350
+ k_(0) {}
351
+
352
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
353
+ std::complex<double> term;
354
+ if (k_ < n_) {
355
+ term = (d1_ + d2_ - d3_ - d4_ + log_neg_z_) * factor_;
356
+ // Use digamma(x + 1) = digamma(x) + 1/x
357
+ d1_ += 1 / (1.0 + k_); // d1 = digamma(1 + k)
358
+ d2_ += 1 / (1 + m_ + k_); // d2 = digamma(1 + m + k)
359
+ d3_ += 1 / (a_ + k_); // d3 = digamma(a + k)
360
+ d4_ -= 1 / (n_ - k_ - 1); // d4 = digamma(c - a - k)
361
+ factor_ *= (b_ + m_ + k_) * (1 - c_ + b_ + m_ + k_) / ((k_ + 1) * (m_ + k_ + 1)) / z_;
362
+ ++k_;
363
+ return term;
364
+ }
365
+ if (k_ == n_) {
366
+ /* When c - a approaches a positive integer and k_ >= c - a = n then
367
+ * poch(1 - c + b + m + k) = poch(1 - c + a + k) = approaches zero and
368
+ * digamma(c - a - k) approaches a pole. However we can use the limit
369
+ * digamma(-n + epsilon) / gamma(-n + epsilon) -> (-1)**(n + 1) * (n+1)! as epsilon -> 0
370
+ * to continue the series.
371
+ *
372
+ * poch(1 - c + b, m + k) = gamma(1 - c + b + m + k)/gamma(1 - c + b)
373
+ *
374
+ * If a - b is an integer and c - a is an integer, then a and b must both be integers, so assume
375
+ * a and b are integers and take the limit as c approaches an integer.
376
+ *
377
+ * gamma(1 - c + epsilon + a + k)/gamma(1 - c - epsilon + b) =
378
+ * (gamma(c + epsilon - b) / gamma(c + epsilon - a - k)) *
379
+ * (sin(pi * (c + epsilon - b)) / sin(pi * (c + epsilon - a - k))) (reflection principle)
380
+ *
381
+ * In the limit as epsilon goes to zero, the ratio of sines will approach
382
+ * (-1)**(a - b + k) = (-1)**(m + k)
383
+ *
384
+ * We may then replace
385
+ *
386
+ * poch(1 - c - epsilon + b, m + k)*digamma(c + epsilon - a - k)
387
+ *
388
+ * with
389
+ *
390
+ * (-1)**(a - b + k)*gamma(c + epsilon - b) * digamma(c + epsilon - a - k) / gamma(c + epsilon - a - k)
391
+ *
392
+ * and taking the limit epsilon -> 0 gives
393
+ *
394
+ * (-1)**(a - b + k) * gamma(c - b) * (-1)**(k + a - c + 1)(k + a - c)!
395
+ * = (-1)**(c - b - 1)*Gamma(k + a - c + 1)
396
+ */
397
+ factor_ = std::pow(-1, m_ + n_) * special::binom(c_ - 1, b_ - 1) *
398
+ special::cephes::poch(c_ - a_ + 1, m_ - 1) / std::pow(z_, static_cast<double>(k_));
399
+ }
400
+ term = factor_;
401
+ factor_ *= (b_ + m_ + k_) * (k_ + a_ - c_ + 1) / ((k_ + 1) * (m_ + k_ + 1)) / z_;
402
+ ++k_;
403
+ return term;
404
+ }
405
+
406
+ private:
407
+ double d1_, d2_, d3_, d4_, a_, b_, c_, m_, n_;
408
+ std::complex<double> z_, log_neg_z_, factor_;
409
+ std::uint64_t k_;
410
+ };
411
+
412
+ class Hyp2f1Transform2LimitFinitePartGenerator {
413
+ /* Initial finite sum in limit as a - b approaches a non-negative integer m. The limiting series
414
+ * for the 1 - z transform also has an initial finite sum, but it is a standard hypergeometric
415
+ * series. */
416
+ public:
417
+ SPECFUN_HOST_DEVICE Hyp2f1Transform2LimitFinitePartGenerator(double b, double c, double m,
418
+ std::complex<double> z)
419
+ : b_(b), c_(c), m_(m), z_(z), term_(cephes::Gamma(m) / cephes::Gamma(c - b)), k_(0) {}
420
+
421
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
422
+ std::complex<double> output = term_;
423
+ term_ = term_ * (b_ + k_) * (c_ - b_ - k_ - 1) / ((k_ + 1) * (m_ - k_ - 1)) / z_;
424
+ ++k_;
425
+ return output;
426
+ }
427
+
428
+ private:
429
+ double b_, c_, m_;
430
+ std::complex<double> z_, term_;
431
+ std::uint64_t k_;
432
+ };
433
+
434
+ class LopezTemmeSeriesGenerator {
435
+ /* Lopez-Temme Series for Gaussian hypergeometric function [4].
436
+ *
437
+ * Converges for all z with real(z) < 1, including in the regions surrounding
438
+ * the points exp(+- i*pi/3) that are not covered by any of the standard
439
+ * transformations.
440
+ */
441
+ public:
442
+ SPECFUN_HOST_DEVICE LopezTemmeSeriesGenerator(double a, double b, double c, std::complex<double> z)
443
+ : n_(0), a_(a), b_(b), c_(c), phi_previous_(1.0), phi_(1 - 2 * b / c), z_(z), Z_(a * z / (z - 2.0)) {}
444
+
445
+ SPECFUN_HOST_DEVICE std::complex<double> operator()() {
446
+ if (n_ == 0) {
447
+ ++n_;
448
+ return 1.0;
449
+ }
450
+ if (n_ > 1) { // Update phi and Z for n>=2
451
+ double new_phi = ((n_ - 1) * phi_previous_ - (2.0 * b_ - c_) * phi_) / (c_ + (n_ - 1));
452
+ phi_previous_ = phi_;
453
+ phi_ = new_phi;
454
+ Z_ = Z_ * z_ / (z_ - 2.0) * ((a_ + (n_ - 1)) / n_);
455
+ }
456
+ ++n_;
457
+ return Z_ * phi_;
458
+ }
459
+
460
+ private:
461
+ std::uint64_t n_;
462
+ double a_, b_, c_, phi_previous_, phi_;
463
+ std::complex<double> z_, Z_;
464
+ };
465
+
466
+ SPECFUN_HOST_DEVICE std::complex<double> hyp2f1_transform1_limiting_case(double a, double b, double c, double m,
467
+ std::complex<double> z) {
468
+ /* 1 - z transform in limiting case where c - a - b approaches an integer m. */
469
+ std::complex<double> result = 0.0;
470
+ if (m >= 0) {
471
+ if (m != 0) {
472
+ auto series_generator = HypergeometricSeriesGenerator(a, b, 1 - m, 1.0 - z);
473
+ result += four_gammas(m, c, a + m, b + m) * series_eval_fixed_length(series_generator,
474
+ std::complex<double>{0.0, 0.0},
475
+ static_cast<std::uint64_t>(m));
476
+ }
477
+ std::complex<double> prefactor = std::pow(-1.0, m + 1) * special::cephes::Gamma(c) /
478
+ (special::cephes::Gamma(a) * special::cephes::Gamma(b)) *
479
+ std::pow(1.0 - z, m);
480
+ auto series_generator = Hyp2f1Transform1LimitSeriesGenerator(a + m, b + m, m, z);
481
+ result += prefactor * series_eval(series_generator, std::complex<double>{0.0, 0.0}, hyp2f1_EPS,
482
+ hyp2f1_MAXITER, "hyp2f1");
483
+ return result;
484
+ } else {
485
+ result = four_gammas(-m, c, a, b) * std::pow(1.0 - z, m);
486
+ auto series_generator1 = HypergeometricSeriesGenerator(a + m, b + m, 1 + m, 1.0 - z);
487
+ result *= series_eval_fixed_length(series_generator1, std::complex<double>{0.0, 0.0},
488
+ static_cast<std::uint64_t>(-m));
489
+ double prefactor = std::pow(-1.0, m + 1) * special::cephes::Gamma(c) /
490
+ (special::cephes::Gamma(a + m) * special::cephes::Gamma(b + m));
491
+ auto series_generator2 = Hyp2f1Transform1LimitSeriesGenerator(a, b, -m, z);
492
+ result += prefactor * series_eval(series_generator2, std::complex<double>{0.0, 0.0}, hyp2f1_EPS,
493
+ hyp2f1_MAXITER, "hyp2f1");
494
+ return result;
495
+ }
496
+ }
497
+
498
+ SPECFUN_HOST_DEVICE std::complex<double> hyp2f1_transform2_limiting_case(double a, double b, double c, double m,
499
+ std::complex<double> z) {
500
+ /* 1 / z transform in limiting case where a - b approaches a non-negative integer m. Negative integer case
501
+ * can be handled by swapping a and b. */
502
+ auto series_generator1 = Hyp2f1Transform2LimitFinitePartGenerator(b, c, m, z);
503
+ std::complex<double> result = cephes::Gamma(c) / cephes::Gamma(a) * std::pow(-z, -b);
504
+ result *=
505
+ series_eval_fixed_length(series_generator1, std::complex<double>{0.0, 0.0}, static_cast<std::uint64_t>(m));
506
+ std::complex<double> prefactor = cephes::Gamma(c) / (cephes::Gamma(a) * cephes::Gamma(c - b) * std::pow(-z, a));
507
+ double n = c - a;
508
+ if (abs(n - std::round(n)) < hyp2f1_EPS) {
509
+ auto series_generator2 = Hyp2f1Transform2LimitSeriesCminusAIntGenerator(a, b, c, m, n, z);
510
+ result += prefactor * series_eval(series_generator2, std::complex<double>{0.0, 0.0}, hyp2f1_EPS,
511
+ hyp2f1_MAXITER, "hyp2f1");
512
+ return result;
513
+ }
514
+ auto series_generator2 = Hyp2f1Transform2LimitSeriesGenerator(a, b, c, m, z);
515
+ result += prefactor *
516
+ series_eval(series_generator2, std::complex<double>{0.0, 0.0}, hyp2f1_EPS, hyp2f1_MAXITER, "hyp2f1");
517
+ return result;
518
+ }
519
+
520
+ } // namespace detail
521
+
522
+ SPECFUN_HOST_DEVICE inline std::complex<double> hyp2f1(double a, double b, double c, std::complex<double> z) {
523
+ /* Special Cases
524
+ * -----------------------------------------------------------------------
525
+ * Takes constant value 1 when a = 0 or b = 0, even if c is a non-positive
526
+ * integer. This follows mpmath. */
527
+ if (a == 0 || b == 0) {
528
+ return 1.0;
529
+ }
530
+ double z_abs = std::abs(z);
531
+ // Equals 1 when z i 0, unless c is 0.
532
+ if (z_abs == 0) {
533
+ if (c != 0) {
534
+ return 1.0;
535
+ } else {
536
+ // Returning real part NAN and imaginary part 0 follows mpmath.
537
+ return std::complex<double>{std::numeric_limits<double>::quiet_NaN(), 0};
538
+ }
539
+ }
540
+ bool a_neg_int = a == std::trunc(a) && a < 0;
541
+ bool b_neg_int = b == std::trunc(b) && b < 0;
542
+ bool c_non_pos_int = c == std::trunc(c) and c <= 0;
543
+ /* Diverges when c is a non-positive integer unless a is an integer with
544
+ * c <= a <= 0 or b is an integer with c <= b <= 0, (or z equals 0 with
545
+ * c != 0) Cases z = 0, a = 0, or b = 0 have already been handled. We follow
546
+ * mpmath in handling the degenerate cases where any of a, b, c are
547
+ * non-positive integers. See [3] for a treatment of degenerate cases. */
548
+ if (c_non_pos_int && !((a_neg_int && c <= a && a < 0) || (b_neg_int && c <= b && b < 0))) {
549
+ return std::complex<double>{std::numeric_limits<double>::infinity(), 0};
550
+ }
551
+ /* Reduces to a polynomial when a or b is a negative integer.
552
+ * If a and b are both negative integers, we take care to terminate
553
+ * the series at a or b of smaller magnitude. This is to ensure proper
554
+ * handling of situations like a < c < b <= 0, a, b, c all non-positive
555
+ * integers, where terminating at a would lead to a term of the form 0 / 0. */
556
+ std::uint64_t max_degree;
557
+ if (a_neg_int || b_neg_int) {
558
+ if (a_neg_int && b_neg_int) {
559
+ max_degree = a > b ? std::abs(a) : std::abs(b);
560
+ } else if (a_neg_int) {
561
+ max_degree = std::abs(a);
562
+ } else {
563
+ max_degree = std::abs(b);
564
+ }
565
+ if (max_degree <= UINT64_MAX) {
566
+ auto series_generator = detail::HypergeometricSeriesGenerator(a, b, c, z);
567
+ return detail::series_eval_fixed_length(series_generator, std::complex<double>{0.0, 0.0}, max_degree + 1);
568
+ } else {
569
+ set_error("hyp2f1", SF_ERROR_NO_RESULT, NULL);
570
+ return std::complex<double>{std::numeric_limits<double>::quiet_NaN(),
571
+ std::numeric_limits<double>::quiet_NaN()};
572
+ }
573
+ }
574
+ // Kummer's Theorem for z = -1; c = 1 + a - b (DLMF 15.4.26)
575
+ if (std::abs(z + 1.0) < detail::hyp2f1_EPS && std::abs(1 + a - b - c) < detail::hyp2f1_EPS && !c_non_pos_int) {
576
+ return detail::four_gammas(a - b + 1, 0.5 * a + 1, a + 1, 0.5 * a - b + 1);
577
+ }
578
+ std::complex<double> result;
579
+ bool c_minus_a_neg_int = c - a == std::trunc(c - a) && c - a < 0;
580
+ bool c_minus_b_neg_int = c - b == std::trunc(c - b) && c - b < 0;
581
+ /* If one of c - a or c - b is a negative integer, reduces to evaluating
582
+ * a polynomial through an Euler hypergeometric transformation.
583
+ * (DLMF 15.8.1) */
584
+ if (c_minus_a_neg_int || c_minus_b_neg_int) {
585
+ max_degree = c_minus_b_neg_int ? std::abs(c - b) : std::abs(c - a);
586
+ if (max_degree <= UINT64_MAX) {
587
+ result = std::pow(1.0 - z, c - a - b);
588
+ auto series_generator = detail::HypergeometricSeriesGenerator(c - a, c - b, c, z);
589
+ result *=
590
+ detail::series_eval_fixed_length(series_generator, std::complex<double>{0.0, 0.0}, max_degree + 2);
591
+ return result;
592
+ } else {
593
+ set_error("hyp2f1", SF_ERROR_NO_RESULT, NULL);
594
+ return std::complex<double>{std::numeric_limits<double>::quiet_NaN(),
595
+ std::numeric_limits<double>::quiet_NaN()};
596
+ }
597
+ }
598
+ /* Diverges as real(z) -> 1 when c <= a + b.
599
+ * Todo: Actually check for overflow instead of using a fixed tolerance for
600
+ * all parameter combinations like in the Fortran original. */
601
+ if (std::abs(1 - z.real()) < detail::hyp2f1_EPS && z.imag() == 0 && c - a - b <= 0 && !c_non_pos_int) {
602
+ return std::complex<double>{std::numeric_limits<double>::infinity(), 0};
603
+ }
604
+ // Gauss's Summation Theorem for z = 1; c - a - b > 0 (DLMF 15.4.20).
605
+ if (z == 1.0 && c - a - b > 0 && !c_non_pos_int) {
606
+ return detail::four_gammas(c, c - a - b, c - a, c - b);
607
+ }
608
+ /* |z| < 0, z.real() >= 0. Use the Maclaurin Series.
609
+ * -----------------------------------------------------------------------
610
+ * Apply Euler Hypergeometric Transformation (DLMF 15.8.1) to reduce
611
+ * size of a and b if possible. We follow Zhang and Jin's
612
+ * implementation [1] although there is very likely a better heuristic
613
+ * to determine when this transformation should be applied. As it
614
+ * stands, this hurts precision in some cases. */
615
+ if (z_abs < 0.9 && z.real() >= 0) {
616
+ if (c - a < a && c - b < b) {
617
+ result = std::pow(1.0 - z, c - a - b);
618
+ auto series_generator = detail::HypergeometricSeriesGenerator(c - a, c - b, c, z);
619
+ result *= detail::series_eval(series_generator, std::complex<double>{0.0, 0.0}, detail::hyp2f1_EPS,
620
+ detail::hyp2f1_MAXITER, "hyp2f1");
621
+ return result;
622
+ }
623
+ auto series_generator = detail::HypergeometricSeriesGenerator(a, b, c, z);
624
+ return detail::series_eval(series_generator, std::complex<double>{0.0, 0.0}, detail::hyp2f1_EPS,
625
+ detail::hyp2f1_MAXITER, "hyp2f1");
626
+ }
627
+ /* Points near exp(iπ/3), exp(-iπ/3) not handled by any of the standard
628
+ * transformations. Use series of López and Temme [5]. These regions
629
+ * were not correctly handled by Zhang and Jin's implementation.
630
+ * -------------------------------------------------------------------------*/
631
+ if (0.9 <= z_abs && z_abs < 1.1 && std::abs(1.0 - z) >= 0.9 && z.real() >= 0) {
632
+ /* This condition for applying Euler Transformation (DLMF 15.8.1)
633
+ * was determined empirically to work better for this case than that
634
+ * used in Zhang and Jin's implementation for |z| < 0.9,
635
+ * real(z) >= 0. */
636
+ if ((c - a <= a && c - b < b) || (c - a < a && c - b <= b)) {
637
+ auto series_generator = detail::LopezTemmeSeriesGenerator(c - a, c - b, c, z);
638
+ result = std::pow(1.0 - 0.5 * z, a - c); // Lopez-Temme prefactor
639
+ result *= detail::series_eval(series_generator, std::complex<double>{0.0, 0.0}, detail::hyp2f1_EPS,
640
+ detail::hyp2f1_MAXITER, "hyp2f1");
641
+ return std::pow(1.0 - z, c - a - b) * result; // Euler transform prefactor.
642
+ }
643
+ auto series_generator = detail::LopezTemmeSeriesGenerator(a, b, c, z);
644
+ result = detail::series_eval(series_generator, std::complex<double>{0.0, 0.0}, detail::hyp2f1_EPS,
645
+ detail::hyp2f1_MAXITER, "hyp2f1");
646
+ return std::pow(1.0 - 0.5 * z, -a) * result; // Lopez-Temme prefactor.
647
+ }
648
+ /* z/(z - 1) transformation (DLMF 15.8.1). Avoids cancellation issues that
649
+ * occur with Maclaurin series for real(z) < 0.
650
+ * -------------------------------------------------------------------------*/
651
+ if (z_abs < 1.1 && z.real() < 0) {
652
+ if (0 < b && b < a && a < c) {
653
+ std::swap(a, b);
654
+ }
655
+ auto series_generator = detail::HypergeometricSeriesGenerator(a, c - b, c, z / (z - 1.0));
656
+ return std::pow(1.0 - z, -a) * detail::series_eval(series_generator, std::complex<double>{0.0, 0.0},
657
+ detail::hyp2f1_EPS, detail::hyp2f1_MAXITER, "hyp2f1");
658
+ }
659
+ /* 1 - z transformation (DLMF 15.8.4). */
660
+ if (0.9 <= z_abs && z_abs < 1.1) {
661
+ if (std::abs(c - a - b - std::round(c - a - b)) < detail::hyp2f1_EPS) {
662
+ // Removable singularity when c - a - b is an integer. Need to use limiting formula.
663
+ double m = std::round(c - a - b);
664
+ return detail::hyp2f1_transform1_limiting_case(a, b, c, m, z);
665
+ }
666
+ auto series_generator = detail::Hyp2f1Transform1Generator(a, b, c, z);
667
+ return detail::series_eval(series_generator, std::complex<double>{0.0, 0.0}, detail::hyp2f1_EPS,
668
+ detail::hyp2f1_MAXITER, "hyp2f1");
669
+ }
670
+ /* 1/z transformation (DLMF 15.8.2). */
671
+ if (std::abs(a - b - std::round(a - b)) < detail::hyp2f1_EPS) {
672
+ if (b > a) {
673
+ std::swap(a, b);
674
+ }
675
+ double m = std::round(a - b);
676
+ return detail::hyp2f1_transform2_limiting_case(a, b, c, m, z);
677
+ }
678
+ auto series_generator = detail::Hyp2f1Transform2Generator(a, b, c, z);
679
+ return detail::series_eval(series_generator, std::complex<double>{0.0, 0.0}, detail::hyp2f1_EPS,
680
+ detail::hyp2f1_MAXITER, "hyp2f1");
681
+ }
682
+
683
+ inline std::complex<float> hyp2f1(float a, float b, float c, std::complex<float> x) {
684
+ return static_cast<std::complex<float>>(hyp2f1(static_cast<double>(a), static_cast<double>(b),
685
+ static_cast<double>(c), static_cast<std::complex<double>>(x)));
686
+ }
687
+
688
+ inline double hyp2f1(double a, double b, double c, double x) { return cephes::hyp2f1(a, b, c, x); }
689
+
690
+ inline float hyp2f1(float a, float b, float c, float x) {
691
+ return hyp2f1(static_cast<double>(a), static_cast<double>(b), static_cast<double>(c), static_cast<double>(x));
692
+ }
693
+
694
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/lambertw.h ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2023.
2
+ * Original header with Copyright information appears below.
3
+ */
4
+
5
+ /* Implementation of the Lambert W function [1]. Based on MPMath
6
+ * Implementation [2], and documentation [3].
7
+ *
8
+ * Copyright: Yosef Meller, 2009
9
+ * Author email: mellerf@netvision.net.il
10
+ *
11
+ * Distributed under the same license as SciPy
12
+ *
13
+ *
14
+ * References:
15
+ * [1] On the Lambert W function, Adv. Comp. Math. 5 (1996) 329-359,
16
+ * available online: https://web.archive.org/web/20230123211413/https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf
17
+ * [2] mpmath source code,
18
+ https://github.com/mpmath/mpmath/blob/c5939823669e1bcce151d89261b802fe0d8978b4/mpmath/functions/functions.py#L435-L461
19
+ * [3]
20
+ https://web.archive.org/web/20230504171447/https://mpmath.org/doc/current/functions/powers.html#lambert-w-function
21
+ *
22
+
23
+ * TODO: use a series expansion when extremely close to the branch point
24
+ * at `-1/e` and make sure that the proper branch is chosen there.
25
+ */
26
+
27
+ #pragma once
28
+
29
+ #include "config.h"
30
+ #include "error.h"
31
+ #include "evalpoly.h"
32
+
33
+ namespace special {
34
+ constexpr double EXPN1 = 0.36787944117144232159553; // exp(-1)
35
+ constexpr double OMEGA = 0.56714329040978387299997; // W(1, 0)
36
+
37
+ namespace detail {
38
+ SPECFUN_HOST_DEVICE inline std::complex<double> lambertw_branchpt(std::complex<double> z) {
39
+ // Series for W(z, 0) around the branch point; see 4.22 in [1].
40
+ double coeffs[] = {-1.0 / 3.0, 1.0, -1.0};
41
+ std::complex<double> p = std::sqrt(2.0 * (M_E * z + 1.0));
42
+
43
+ return cevalpoly(coeffs, 2, p);
44
+ }
45
+
46
+ SPECFUN_HOST_DEVICE inline std::complex<double> lambertw_pade0(std::complex<double> z) {
47
+ // (3, 2) Pade approximation for W(z, 0) around 0.
48
+ double num[] = {12.85106382978723404255, 12.34042553191489361902, 1.0};
49
+ double denom[] = {32.53191489361702127660, 14.34042553191489361702, 1.0};
50
+
51
+ /* This only gets evaluated close to 0, so we don't need a more
52
+ * careful algorithm that avoids overflow in the numerator for
53
+ * large z. */
54
+ return z * cevalpoly(num, 2, z) / cevalpoly(denom, 2, z);
55
+ }
56
+
57
+ SPECFUN_HOST_DEVICE inline std::complex<double> lambertw_asy(std::complex<double> z, long k) {
58
+ /* Compute the W function using the first two terms of the
59
+ * asymptotic series. See 4.20 in [1].
60
+ */
61
+ std::complex<double> w = std::log(z) + 2.0 * M_PI * k * std::complex<double>(0, 1);
62
+ return w - std::log(w);
63
+ }
64
+
65
+ } // namespace detail
66
+
67
+ SPECFUN_HOST_DEVICE inline std::complex<double> lambertw(std::complex<double> z, long k, double tol) {
68
+ double absz;
69
+ std::complex<double> w;
70
+ std::complex<double> ew, wew, wewz, wn;
71
+
72
+ if (std::isnan(z.real()) || std::isnan(z.imag())) {
73
+ return z;
74
+ }
75
+ if (z.real() == std::numeric_limits<double>::infinity()) {
76
+ return z + 2.0 * M_PI * k * std::complex<double>(0, 1);
77
+ }
78
+ if (z.real() == -std::numeric_limits<double>::infinity()) {
79
+ return -z + (2.0 * M_PI * k + M_PI) * std::complex<double>(0, 1);
80
+ }
81
+ if (z == 0.0) {
82
+ if (k == 0) {
83
+ return z;
84
+ }
85
+ set_error("lambertw", SF_ERROR_SINGULAR, NULL);
86
+ return -std::numeric_limits<double>::infinity();
87
+ }
88
+ if (z == 1.0 && k == 0) {
89
+ // Split out this case because the asymptotic series blows up
90
+ return OMEGA;
91
+ }
92
+
93
+ absz = std::abs(z);
94
+ // Get an initial guess for Halley's method
95
+ if (k == 0) {
96
+ if (std::abs(z + EXPN1) < 0.3) {
97
+ w = detail::lambertw_branchpt(z);
98
+ } else if (-1.0 < z.real() && z.real() < 1.5 && std::abs(z.imag()) < 1.0 &&
99
+ -2.5 * std::abs(z.imag()) - 0.2 < z.real()) {
100
+ /* Empirically determined decision boundary where the Pade
101
+ * approximation is more accurate. */
102
+ w = detail::lambertw_pade0(z);
103
+ } else {
104
+ w = detail::lambertw_asy(z, k);
105
+ }
106
+ } else if (k == -1) {
107
+ if (absz <= EXPN1 && z.imag() == 0.0 && z.real() < 0.0) {
108
+ w = std::log(-z.real());
109
+ } else {
110
+ w = detail::lambertw_asy(z, k);
111
+ }
112
+ } else {
113
+ w = detail::lambertw_asy(z, k);
114
+ }
115
+
116
+ // Halley's method; see 5.9 in [1]
117
+ if (w.real() >= 0) {
118
+ // Rearrange the formula to avoid overflow in exp
119
+ for (int i = 0; i < 100; i++) {
120
+ ew = std::exp(-w);
121
+ wewz = w - z * ew;
122
+ wn = w - wewz / (w + 1.0 - (w + 2.0) * wewz / (2.0 * w + 2.0));
123
+ if (std::abs(wn - w) <= tol * std::abs(wn)) {
124
+ return wn;
125
+ }
126
+ w = wn;
127
+ }
128
+ } else {
129
+ for (int i = 0; i < 100; i++) {
130
+ ew = std::exp(w);
131
+ wew = w * ew;
132
+ wewz = wew - z;
133
+ wn = w - wewz / (wew + ew - (w + 2.0) * wewz / (2.0 * w + 2.0));
134
+ if (std::abs(wn - w) <= tol * std::abs(wn)) {
135
+ return wn;
136
+ }
137
+ w = wn;
138
+ }
139
+ }
140
+
141
+ set_error("lambertw", SF_ERROR_SLOW, "iteration failed to converge: %g + %gj", z.real(), z.imag());
142
+ return {std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()};
143
+ }
144
+
145
+ SPECFUN_HOST_DEVICE inline std::complex<float> lambertw(std::complex<float> z, long k, float tol) {
146
+ return static_cast<std::complex<float>>(
147
+ lambertw(static_cast<std::complex<double>>(z), k, static_cast<double>(tol)));
148
+ }
149
+
150
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/loggamma.h ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2024.
2
+ * Original header comment appears below.
3
+ */
4
+
5
+ /* An implementation of the principal branch of the logarithm of
6
+ * Gamma. Also contains implementations of Gamma and 1/Gamma which are
7
+ * easily computed from log-Gamma.
8
+ *
9
+ * Author: Josh Wilson
10
+ *
11
+ * Distributed under the same license as Scipy.
12
+ *
13
+ * References
14
+ * ----------
15
+ * [1] Hare, "Computing the Principal Branch of log-Gamma",
16
+ * Journal of Algorithms, 1997.
17
+ *
18
+ * [2] Julia,
19
+ * https://github.com/JuliaLang/julia/blob/master/base/special/gamma.jl
20
+ */
21
+
22
+ #pragma once
23
+
24
+ #include "cephes/gamma.h"
25
+ #include "cephes/rgamma.h"
26
+ #include "config.h"
27
+ #include "error.h"
28
+ #include "evalpoly.h"
29
+ #include "trig.h"
30
+ #include "zlog1.h"
31
+
32
+ namespace special {
33
+
34
+ namespace detail {
35
+ constexpr double loggamma_SMALLX = 7;
36
+ constexpr double loggamma_SMALLY = 7;
37
+ constexpr double loggamma_HLOG2PI = 0.918938533204672742; // log(2*pi)/2
38
+ constexpr double loggamma_LOGPI = 1.1447298858494001741434262; // log(pi)
39
+ constexpr double loggamma_TAYLOR_RADIUS = 0.2;
40
+
41
+ SPECFUN_HOST_DEVICE std::complex<double> loggamma_stirling(std::complex<double> z) {
42
+ /* Stirling series for log-Gamma
43
+ *
44
+ * The coefficients are B[2*n]/(2*n*(2*n - 1)) where B[2*n] is the
45
+ * (2*n)th Bernoulli number. See (1.1) in [1].
46
+ */
47
+ double coeffs[] = {-2.955065359477124183E-2, 6.4102564102564102564E-3, -1.9175269175269175269E-3,
48
+ 8.4175084175084175084E-4, -5.952380952380952381E-4, 7.9365079365079365079E-4,
49
+ -2.7777777777777777778E-3, 8.3333333333333333333E-2};
50
+ std::complex<double> rz = 1.0 / z;
51
+ std::complex<double> rzz = rz / z;
52
+
53
+ return (z - 0.5) * std::log(z) - z + loggamma_HLOG2PI + rz * cevalpoly(coeffs, 7, rzz);
54
+ }
55
+
56
+ SPECFUN_HOST_DEVICE std::complex<double> loggamma_recurrence(std::complex<double> z) {
57
+ /* Backward recurrence relation.
58
+ *
59
+ * See Proposition 2.2 in [1] and the Julia implementation [2].
60
+ *
61
+ */
62
+ int signflips = 0;
63
+ int sb = 0;
64
+ std::complex<double> shiftprod = z;
65
+
66
+ z += 1.0;
67
+ int nsb;
68
+ while (z.real() <= loggamma_SMALLX) {
69
+ shiftprod *= z;
70
+ nsb = std::signbit(shiftprod.imag());
71
+ signflips += nsb != 0 && sb == 0 ? 1 : 0;
72
+ sb = nsb;
73
+ z += 1.0;
74
+ }
75
+ return loggamma_stirling(z) - std::log(shiftprod) - signflips * 2 * M_PI * std::complex<double>(0, 1);
76
+ }
77
+
78
+ SPECFUN_HOST_DEVICE std::complex<double> loggamma_taylor(std::complex<double> z) {
79
+ /* Taylor series for log-Gamma around z = 1.
80
+ *
81
+ * It is
82
+ *
83
+ * loggamma(z + 1) = -gamma*z + zeta(2)*z**2/2 - zeta(3)*z**3/3 ...
84
+ *
85
+ * where gamma is the Euler-Mascheroni constant.
86
+ */
87
+
88
+ double coeffs[] = {
89
+ -4.3478266053040259361E-2, 4.5454556293204669442E-2, -4.7619070330142227991E-2, 5.000004769810169364E-2,
90
+ -5.2631679379616660734E-2, 5.5555767627403611102E-2, -5.8823978658684582339E-2, 6.2500955141213040742E-2,
91
+ -6.6668705882420468033E-2, 7.1432946295361336059E-2, -7.6932516411352191473E-2, 8.3353840546109004025E-2,
92
+ -9.0954017145829042233E-2, 1.0009945751278180853E-1, -1.1133426586956469049E-1, 1.2550966952474304242E-1,
93
+ -1.4404989676884611812E-1, 1.6955717699740818995E-1, -2.0738555102867398527E-1, 2.7058080842778454788E-1,
94
+ -4.0068563438653142847E-1, 8.2246703342411321824E-1, -5.7721566490153286061E-1};
95
+
96
+ z -= 1.0;
97
+ return z * cevalpoly(coeffs, 22, z);
98
+ }
99
+ } // namespace detail
100
+
101
+ SPECFUN_HOST_DEVICE inline double loggamma(double x) {
102
+ if (x < 0.0) {
103
+ return std::numeric_limits<double>::quiet_NaN();
104
+ }
105
+ return cephes::lgam(x);
106
+ }
107
+
108
+ SPECFUN_HOST_DEVICE inline float loggamma(float x) { return loggamma(static_cast<double>(x)); }
109
+
110
+ SPECFUN_HOST_DEVICE inline std::complex<double> loggamma(std::complex<double> z) {
111
+ // Compute the principal branch of log-Gamma
112
+
113
+ if (std::isnan(z.real()) || std::isnan(z.imag())) {
114
+ return {std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()};
115
+ }
116
+ if (z.real() <= 0 and z == std::floor(z.real())) {
117
+ set_error("loggamma", SF_ERROR_SINGULAR, NULL);
118
+ return {std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()};
119
+ }
120
+ if (z.real() > detail::loggamma_SMALLX || std::abs(z.imag()) > detail::loggamma_SMALLY) {
121
+ return detail::loggamma_stirling(z);
122
+ }
123
+ if (std::abs(z - 1.0) < detail::loggamma_TAYLOR_RADIUS) {
124
+ return detail::loggamma_taylor(z);
125
+ }
126
+ if (std::abs(z - 2.0) < detail::loggamma_TAYLOR_RADIUS) {
127
+ // Recurrence relation and the Taylor series around 1.
128
+ return detail::zlog1(z - 1.0) + detail::loggamma_taylor(z - 1.0);
129
+ }
130
+ if (z.real() < 0.1) {
131
+ // Reflection formula; see Proposition 3.1 in [1]
132
+ double tmp = std::copysign(2 * M_PI, z.imag()) * std::floor(0.5 * z.real() + 0.25);
133
+ return std::complex<double>(detail::loggamma_LOGPI, tmp) - std::log(sinpi(z)) - loggamma(1.0 - z);
134
+ }
135
+ if (std::signbit(z.imag()) == 0) {
136
+ // z.imag() >= 0 but is not -0.0
137
+ return detail::loggamma_recurrence(z);
138
+ }
139
+ return std::conj(detail::loggamma_recurrence(std::conj(z)));
140
+ }
141
+
142
+ SPECFUN_HOST_DEVICE inline std::complex<float> loggamma(std::complex<float> z) {
143
+ return static_cast<std::complex<float>>(loggamma(static_cast<std::complex<double>>(z)));
144
+ }
145
+
146
+ SPECFUN_HOST_DEVICE inline double rgamma(double z) { return cephes::rgamma(z); }
147
+
148
+ SPECFUN_HOST_DEVICE inline float rgamma(float z) { return rgamma(static_cast<double>(z)); }
149
+
150
+ SPECFUN_HOST_DEVICE inline std::complex<double> rgamma(std::complex<double> z) {
151
+ // Compute 1/Gamma(z) using loggamma.
152
+ if (z.real() <= 0 && z == std::floor(z.real())) {
153
+ // Zeros at 0, -1, -2, ...
154
+ return 0.0;
155
+ }
156
+ return std::exp(-loggamma(z));
157
+ }
158
+
159
+ SPECFUN_HOST_DEVICE inline std::complex<float> rgamma(std::complex<float> z) {
160
+ return static_cast<std::complex<float>>(rgamma(static_cast<std::complex<double>>(z)));
161
+ }
162
+
163
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/tools.h ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Building blocks for implementing special functions */
2
+
3
+ #pragma once
4
+
5
+ #include "config.h"
6
+ #include "error.h"
7
+
8
+ namespace special {
9
+ namespace detail {
10
+
11
+ /* Result type of a "generator", a callable object that produces a value
12
+ * each time it is called.
13
+ */
14
+ template <typename Generator>
15
+ using generator_result_t = std::decay_t<std::invoke_result_t<Generator>>;
16
+
17
+ /* Used to deduce the type of the numerator/denominator of a fraction. */
18
+ template <typename Pair>
19
+ struct pair_traits;
20
+
21
+ template <typename T>
22
+ struct pair_traits<std::pair<T, T>> {
23
+ using value_type = T;
24
+ };
25
+
26
+ template <typename Pair>
27
+ using pair_value_t = typename pair_traits<Pair>::value_type;
28
+
29
+ /* Used to extract the "value type" of a complex type. */
30
+ template <typename T>
31
+ struct real_type {
32
+ using type = T;
33
+ };
34
+
35
+ template <typename T>
36
+ struct real_type<std::complex<T>> {
37
+ using type = T;
38
+ };
39
+
40
+ template <typename T>
41
+ using real_type_t = typename real_type<T>::type;
42
+
43
+ // Return NaN, handling both real and complex types.
44
+ template <typename T>
45
+ SPECFUN_HOST_DEVICE inline std::enable_if_t<std::is_floating_point_v<T>, T> maybe_complex_NaN() {
46
+ return std::numeric_limits<T>::quiet_NaN();
47
+ }
48
+
49
+ template <typename T>
50
+ SPECFUN_HOST_DEVICE inline std::enable_if_t<!std::is_floating_point_v<T>, T> maybe_complex_NaN() {
51
+ using V = typename T::value_type;
52
+ return {std::numeric_limits<V>::quiet_NaN(), std::numeric_limits<V>::quiet_NaN()};
53
+ }
54
+
55
+ // Series evaluators.
56
+ template <typename Generator, typename T = generator_result_t<Generator>>
57
+ SPECFUN_HOST_DEVICE T series_eval(Generator &g, T init_val, real_type_t<T> tol, std::uint64_t max_terms,
58
+ const char *func_name) {
59
+ /* Sum an infinite series to a given precision.
60
+ *
61
+ * g : a generator of terms for the series.
62
+ *
63
+ * init_val : A starting value that terms are added to. This argument determines the
64
+ * type of the result.
65
+ *
66
+ * tol : relative tolerance for stopping criterion.
67
+ *
68
+ * max_terms : The maximum number of terms to add before giving up and declaring
69
+ * non-convergence.
70
+ *
71
+ * func_name : The name of the function within SciPy where this call to series_eval
72
+ * will ultimately be used. This is needed to pass to set_error in case
73
+ * of non-convergence.
74
+ */
75
+ T result = init_val;
76
+ T term;
77
+ for (std::uint64_t i = 0; i < max_terms; ++i) {
78
+ term = g();
79
+ result += term;
80
+ if (std::abs(term) < std::abs(result) * tol) {
81
+ return result;
82
+ }
83
+ }
84
+ // Exceeded max terms without converging. Return NaN.
85
+ set_error(func_name, SF_ERROR_NO_RESULT, NULL);
86
+ return maybe_complex_NaN<T>();
87
+ }
88
+
89
+ template <typename Generator, typename T = generator_result_t<Generator>>
90
+ SPECFUN_HOST_DEVICE T series_eval_fixed_length(Generator &g, T init_val, std::uint64_t num_terms) {
91
+ /* Sum a fixed number of terms from a series.
92
+ *
93
+ * g : a generator of terms for the series.
94
+ *
95
+ * init_val : A starting value that terms are added to. This argument determines the
96
+ * type of the result.
97
+ *
98
+ * max_terms : The number of terms from the series to sum.
99
+ *
100
+ */
101
+ T result = init_val;
102
+ for (std::uint64_t i = 0; i < num_terms; ++i) {
103
+ result += g();
104
+ }
105
+ return result;
106
+ }
107
+
108
+ /* Performs one step of Kahan summation. */
109
+ template <typename T>
110
+ SPECFUN_HOST_DEVICE void kahan_step(T& sum, T& comp, T x) {
111
+ T y = x - comp;
112
+ T t = sum + y;
113
+ comp = (t - sum) - y;
114
+ sum = t;
115
+ }
116
+
117
+ /* Evaluates an infinite series using Kahan summation.
118
+ *
119
+ * Denote the series by
120
+ *
121
+ * S = a[0] + a[1] + a[2] + ...
122
+ *
123
+ * And for n = 0, 1, 2, ..., denote its n-th partial sum by
124
+ *
125
+ * S[n] = a[0] + a[1] + ... + a[n]
126
+ *
127
+ * This function computes S[0], S[1], ... until a[n] is sufficiently
128
+ * small or if the maximum number of terms have been evaluated.
129
+ *
130
+ * Parameters
131
+ * ----------
132
+ * g
133
+ * Reference to generator that yields the sequence of values a[1],
134
+ * a[2], a[3], ...
135
+ *
136
+ * tol
137
+ * Relative tolerance for convergence. Specifically, stop iteration
138
+ * as soon as `abs(a[n]) <= tol * abs(S[n])` for some n >= 1.
139
+ *
140
+ * max_terms
141
+ * Maximum number of terms after a[0] to evaluate. It should be set
142
+ * large enough such that the convergence criterion is guaranteed
143
+ * to have been satisfied within that many terms if there is no
144
+ * rounding error.
145
+ *
146
+ * init_val
147
+ * a[0]. Default is zero. The type of this parameter (T) is used
148
+ * for intermediary computations as well as the result.
149
+ *
150
+ * Return Value
151
+ * ------------
152
+ * If the convergence criterion is satisfied by some `n <= max_terms`,
153
+ * returns `(S[n], n)`. Otherwise, returns `(S[max_terms], 0)`.
154
+ */
155
+ template <typename Generator, typename T = generator_result_t<Generator>>
156
+ SPECFUN_HOST_DEVICE std::pair<T, std::uint64_t> series_eval_kahan(
157
+ Generator &&g, real_type_t<T> tol, std::uint64_t max_terms, T init_val = T(0)) {
158
+
159
+ T sum = init_val;
160
+ T comp = 0;
161
+ for (std::uint64_t i = 0; i < max_terms; ++i) {
162
+ T term = g();
163
+ kahan_step(sum, comp, term);
164
+ if (std::abs(term) <= tol * std::abs(sum)) {
165
+ return {sum, i + 1};
166
+ }
167
+ }
168
+ return {sum, 0};
169
+ }
170
+
171
+ /* Generator that yields the difference of successive convergents of a
172
+ * continued fraction.
173
+ *
174
+ * Let f[n] denote the n-th convergent of a continued fraction:
175
+ *
176
+ * a[1] a[2] a[n]
177
+ * f[n] = b[0] + ------ ------ ... ----
178
+ * b[1] + b[2] + b[n]
179
+ *
180
+ * with f[0] = b[0]. This generator yields the sequence of values
181
+ * f[1]-f[0], f[2]-f[1], f[3]-f[2], ...
182
+ *
183
+ * Constructor Arguments
184
+ * ---------------------
185
+ * cf
186
+ * Reference to generator that yields the terms of the continued
187
+ * fraction as (numerator, denominator) pairs, starting from
188
+ * (a[1], b[1]).
189
+ *
190
+ * `cf` must outlive the ContinuedFractionSeriesGenerator object.
191
+ *
192
+ * The constructed object always eagerly retrieves the next term
193
+ * of the continued fraction. Specifically, (a[1], b[1]) is
194
+ * retrieved upon construction, and (a[n], b[n]) is retrieved after
195
+ * (n-1) calls of `()`.
196
+ *
197
+ * Type Arguments
198
+ * --------------
199
+ * T
200
+ * Type in which computations are performed and results are turned.
201
+ *
202
+ * Remarks
203
+ * -------
204
+ * The series is computed using the recurrence relation described in [1].
205
+ *
206
+ * No error checking is performed. The caller must ensure that all terms
207
+ * are finite and that intermediary computations do not trigger floating
208
+ * point exceptions such as overflow.
209
+ *
210
+ * The numerical stability of this method depends on the characteristics
211
+ * of the continued fraction being evaluated.
212
+ *
213
+ * Reference
214
+ * ---------
215
+ * [1] Gautschi, W. (1967). “Computational Aspects of Three-Term
216
+ * Recurrence Relations.” SIAM Review, 9(1):24-82.
217
+ */
218
+ template <typename Generator, typename T = pair_value_t<generator_result_t<Generator>>>
219
+ class ContinuedFractionSeriesGenerator {
220
+
221
+ public:
222
+ explicit ContinuedFractionSeriesGenerator(Generator &cf) : cf_(cf) {
223
+ init();
224
+ }
225
+
226
+ double operator()() {
227
+ double v = v_;
228
+ advance();
229
+ return v;
230
+ }
231
+
232
+ private:
233
+ void init() {
234
+ auto [num, denom] = cf_();
235
+ T a = num;
236
+ T b = denom;
237
+ u_ = T(1);
238
+ v_ = a / b;
239
+ b_ = b;
240
+ }
241
+
242
+ void advance() {
243
+ auto [num, denom] = cf_();
244
+ T a = num;
245
+ T b = denom;
246
+ u_ = T(1) / (T(1) + (a * u_) / (b * b_));
247
+ v_ *= (u_ - T(1));
248
+ b_ = b;
249
+ }
250
+
251
+ Generator& cf_; // reference to continued fraction generator
252
+ T v_; // v[n] == f[n] - f[n-1], n >= 1
253
+ T u_; // u[1] = 1, u[n] = v[n]/v[n-1], n >= 2
254
+ T b_; // last denominator, i.e. b[n-1]
255
+ };
256
+
257
+ /* Converts a continued fraction into a series whose terms are the
258
+ * difference of its successive convergents.
259
+ *
260
+ * See ContinuedFractionSeriesGenerator for details.
261
+ */
262
+ template <typename Generator, typename T = pair_value_t<generator_result_t<Generator>>>
263
+ SPECFUN_HOST_DEVICE ContinuedFractionSeriesGenerator<Generator, T>
264
+ continued_fraction_series(Generator &cf) {
265
+ return ContinuedFractionSeriesGenerator<Generator, T>(cf);
266
+ }
267
+
268
+ } // namespace detail
269
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/trig.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2023.
2
+ *
3
+ * Original author: Josh Wilson, 2016.
4
+ */
5
+
6
+ /* Implement sin(pi*z) and cos(pi*z) for complex z. Since the periods
7
+ * of these functions are integral (and thus better representable in
8
+ * floating point), it's possible to compute them with greater accuracy
9
+ * than sin(z), cos(z).
10
+ */
11
+
12
+ #pragma once
13
+
14
+ #include "cephes/trig.h"
15
+ #include "config.h"
16
+ #include "evalpoly.h"
17
+
18
+ namespace special {
19
+
20
+ template <typename T>
21
+ SPECFUN_HOST_DEVICE T sinpi(T x) {
22
+ return cephes::sinpi(x);
23
+ }
24
+
25
+ template <typename T>
26
+ SPECFUN_HOST_DEVICE std::complex<T> sinpi(std::complex<T> z) {
27
+ T x = z.real();
28
+ T piy = M_PI * z.imag();
29
+ T abspiy = std::abs(piy);
30
+ T sinpix = cephes::sinpi(x);
31
+ T cospix = cephes::cospi(x);
32
+
33
+ if (abspiy < 700) {
34
+ return {sinpix * std::cosh(piy), cospix * std::sinh(piy)};
35
+ }
36
+
37
+ /* Have to be careful--sinh/cosh could overflow while cos/sin are small.
38
+ * At this large of values
39
+ *
40
+ * cosh(y) ~ exp(y)/2
41
+ * sinh(y) ~ sgn(y)*exp(y)/2
42
+ *
43
+ * so we can compute exp(y/2), scale by the right factor of sin/cos
44
+ * and then multiply by exp(y/2) to avoid overflow. */
45
+ T exphpiy = std::exp(abspiy / 2);
46
+ T coshfac;
47
+ T sinhfac;
48
+ if (exphpiy == std::numeric_limits<T>::infinity()) {
49
+ if (sinpix == 0.0) {
50
+ // Preserve the sign of zero.
51
+ coshfac = std::copysign(0.0, sinpix);
52
+ } else {
53
+ coshfac = std::copysign(std::numeric_limits<T>::infinity(), sinpix);
54
+ }
55
+ if (cospix == 0.0) {
56
+ // Preserve the sign of zero.
57
+ sinhfac = std::copysign(0.0, cospix);
58
+ } else {
59
+ sinhfac = std::copysign(std::numeric_limits<T>::infinity(), cospix);
60
+ }
61
+ return {coshfac, sinhfac};
62
+ }
63
+
64
+ coshfac = 0.5 * sinpix * exphpiy;
65
+ sinhfac = 0.5 * cospix * exphpiy;
66
+ return {coshfac * exphpiy, sinhfac * exphpiy};
67
+ }
68
+
69
+ template <typename T>
70
+ SPECFUN_HOST_DEVICE T cospi(T x) {
71
+ return cephes::cospi(x);
72
+ }
73
+
74
+ template <typename T>
75
+ SPECFUN_HOST_DEVICE std::complex<T> cospi(std::complex<T> z) {
76
+ T x = z.real();
77
+ T piy = M_PI * z.imag();
78
+ T abspiy = std::abs(piy);
79
+ T sinpix = cephes::sinpi(x);
80
+ T cospix = cephes::cospi(x);
81
+
82
+ if (abspiy < 700) {
83
+ return {cospix * std::cosh(piy), -sinpix * std::sinh(piy)};
84
+ }
85
+
86
+ // See csinpi(z) for an idea of what's going on here.
87
+ T exphpiy = std::exp(abspiy / 2);
88
+ T coshfac;
89
+ T sinhfac;
90
+ if (exphpiy == std::numeric_limits<T>::infinity()) {
91
+ if (sinpix == 0.0) {
92
+ // Preserve the sign of zero.
93
+ coshfac = std::copysign(0.0, cospix);
94
+ } else {
95
+ coshfac = std::copysign(std::numeric_limits<T>::infinity(), cospix);
96
+ }
97
+ if (cospix == 0.0) {
98
+ // Preserve the sign of zero.
99
+ sinhfac = std::copysign(0.0, sinpix);
100
+ } else {
101
+ sinhfac = std::copysign(std::numeric_limits<T>::infinity(), sinpix);
102
+ }
103
+ return {coshfac, sinhfac};
104
+ }
105
+
106
+ coshfac = 0.5 * cospix * exphpiy;
107
+ sinhfac = 0.5 * sinpix * exphpiy;
108
+ return {coshfac * exphpiy, sinhfac * exphpiy};
109
+ }
110
+
111
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/wright_bessel.h ADDED
@@ -0,0 +1,841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2023.
2
+ * Original header with Copyright information appears below.
3
+ */
4
+
5
+ /* Implementation of Wright's generalized Bessel function Phi, see
6
+ * https://dlmf.nist.gov/10.46.E1
7
+ *
8
+ * Copyright: Christian Lorentzen
9
+ *
10
+ * Distributed under the same license as SciPy
11
+ *
12
+ *
13
+ * Implementation Overview:
14
+ *
15
+ * First, different functions are implemented valid for certain domains of the
16
+ * three arguments.
17
+ * Finally they are put together in wright_bessel. See the docstring of
18
+ * that function for more details.
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #include "cephes/lanczos.h"
24
+ #include "cephes/polevl.h"
25
+ #include "cephes/rgamma.h"
26
+ #include "config.h"
27
+ #include "digamma.h"
28
+ #include "error.h"
29
+
30
+ namespace special {
31
+
32
+ namespace detail {
33
+ // rgamma_zero: smallest value x for which rgamma(x) == 0 as x gets large
34
+ constexpr double rgamma_zero = 178.47241115886637;
35
+
36
+ SPECFUN_HOST_DEVICE inline double exp_rgamma(double x, double y) {
37
+ /* Compute exp(x) / gamma(y) = exp(x) * rgamma(y).
38
+ *
39
+ * This helper function avoids overflow by using the lanczos
40
+ * approximation of the gamma function.
41
+ */
42
+ return std::exp(x + (1 - std::log(y + cephes::lanczos_g - 0.5)) * (y - 0.5)) /
43
+ cephes::lanczos_sum_expg_scaled(y);
44
+ }
45
+
46
+ SPECFUN_HOST_DEVICE inline double wb_series(double a, double b, double x, unsigned int nstart, unsigned int nstop) {
47
+ /* 1. Taylor series expansion in x=0 for x <= 1.
48
+ *
49
+ * Phi(a, b, x) = sum_k x^k / k! / Gamma(a*k + b)
50
+ *
51
+ * Note that every term, and therefore also Phi(a, b, x) is
52
+ * monotone decreasing with increasing a or b.
53
+ */
54
+ double xk_k = std::pow(x, nstart) * cephes::rgamma(nstart + 1); // x^k/k!
55
+ double res = xk_k * cephes::rgamma(nstart * a + b);
56
+ // term k=nstart+1, +2, +3, ...
57
+ if (nstop > nstart) {
58
+ // series expansion until term k such that a*k+b <= rgamma_zero
59
+ unsigned int k_max = std::floor((rgamma_zero - b) / a);
60
+ if (nstop > k_max) {
61
+ nstop = k_max;
62
+ }
63
+ for (unsigned int k = nstart + 1; k < nstop; k++) {
64
+ xk_k *= x / k;
65
+ res += xk_k * cephes::rgamma(a * k + b);
66
+ }
67
+ }
68
+ return res;
69
+ }
70
+
71
+ template<bool log_wb>
72
+ SPECFUN_HOST_DEVICE inline double wb_large_a(double a, double b, double x, int n) {
73
+ /* 2. Taylor series expansion in x=0, for large a.
74
+ *
75
+ * Phi(a, b, x) = sum_k x^k / k! / Gamma(a*k + b)
76
+ *
77
+ * Use Stirling's formula to find k=k_max, the maximum term.
78
+ * Then use n terms of Taylor series around k_max.
79
+ */
80
+ int k_max = static_cast<int>(std::pow(std::pow(a, -a) * x, 1.0 / (1 + a)));
81
+
82
+ int nstart = k_max - n / 2;
83
+ if (nstart < 0) {
84
+ nstart = 0;
85
+ }
86
+
87
+ double res = 0;
88
+ double lnx = std::log(x);
89
+ // For numerical stability, we factor out the maximum term exp(..) with k=k_max
90
+ // but only if it is larger than 0.
91
+ double max_exponent = std::fmax(0, k_max * lnx - cephes::lgam(k_max + 1) - cephes::lgam(a * k_max + b));
92
+ for (int k = nstart; k < nstart + n; k++) {
93
+ res += std::exp(k * lnx - cephes::lgam(k + 1) - cephes::lgam(a * k + b) - max_exponent);
94
+ }
95
+
96
+ if (!log_wb) {
97
+ res *= std::exp(max_exponent);
98
+ } else {
99
+ // logarithm of Wright's function
100
+ res = max_exponent + std::log(res);
101
+ }
102
+ return res;
103
+ }
104
+
105
+ template<bool log_wb>
106
+ SPECFUN_HOST_DEVICE inline double wb_small_a(double a, double b, double x, int order) {
107
+ /* 3. Taylor series in a=0 up to order 5, for tiny a and not too large x
108
+ *
109
+ * Phi(a, b, x) = exp(x)/Gamma(b)
110
+ * (1 - a*x * Psi(b) + a^2/2*x*(1+x) * (Psi(b)^2 - Psi'(b)
111
+ + ... )
112
+ + O(a^6))
113
+ *
114
+ * where Psi is the digamma function.
115
+ *
116
+ * Parameter order takes effect only when b > 1e-3 and 2 <= order <= 5,
117
+ * otherwise it defaults to 2, or if b <= 1e-3, to 5. The lower order is,
118
+ * the fewer polygamma functions have to be computed.
119
+ *
120
+ * Call: python _precompute/wright_bessel.py 1
121
+ *
122
+ * For small b, i.e. b <= 1e-3, cancellation of poles of digamma(b)/Gamma(b)
123
+ * and polygamma needs to be carried out => series expansion in a=0 to order 5
124
+ * and in b=0 to order 4.
125
+ * Call: python _precompute/wright_bessel.py 2
126
+ */
127
+ double A[6]; // coefficients of a^k (1, -x * Psi(b), ...)
128
+ double B[6]; // powers of b^k/k! or terms in polygamma functions
129
+ constexpr double C[5] = { // coefficients of a^k1 * b^k2
130
+ 1.0000000000000000, // C[0]
131
+ 1.1544313298030657, // C[1]
132
+ -3.9352684291215233, // C[2]
133
+ -1.0080632408182857, // C[3]
134
+ 19.984633365874979, // C[4]
135
+ };
136
+ double X[6] = { // polynomials in x;
137
+ 1, // X[0]
138
+ x, // X[1]
139
+ x * (x + 1), // X[2]
140
+ x * (x * (x + 3) + 1), // X[3]
141
+ x * (x * (x * (x + 6) + 7) + 1), // X[4]
142
+ x * (x * (x * (x * (x + 10) + 25) + 15) + 1), // X[5]
143
+ };
144
+ double res;
145
+
146
+ if (b <= 1E-3) {
147
+ /* Series expansion of both a and b up to order 5:
148
+ * M_PI = pi
149
+ * M_EG = Euler Gamma aka Euler Mascheroni constant
150
+ * M_Z3 = zeta(3)
151
+ * C[0] = 1
152
+ * C[1] = 2*M_EG
153
+ * C[2] = 3*M_EG^2 - M_PI^2/2
154
+ * C[3] = 4*M_EG^3 - 2*M_EG*M_PI^2 + 8*M_Z3
155
+ * C[4] = 5*M_EG^4 - 5*M_EG^2*M_PI^2 + 40*M_EG*M_Z3 + M_PI^4/12
156
+ */
157
+ B[0] = 1.;
158
+ for (int k = 1; k < 5; k++) {
159
+ B[k] = b / k * B[k - 1];
160
+ }
161
+ // Note that polevl assumes inverse ordering => A[5] = 0th term
162
+ A[5] = cephes::rgamma(b);
163
+ A[4] = X[1] * (C[0] + C[1] * b + C[2] * B[2] + C[3] * B[3] + C[4] * B[4]);
164
+ A[3] = X[2] / 2. * (C[1] + C[2] * b + C[3] * B[2] + C[4] * B[3]);
165
+ A[2] = X[3] / 6. * (C[2] + C[3] * b + C[4] * B[2]);
166
+ A[1] = X[4] / 24. * (C[3] + C[4] * b);
167
+ A[0] = X[5] / 120. * C[4];
168
+ // res = exp(x) * (A[5] + A[4] * a + A[3] * a^2 + A[2] * a^3 + ...)
169
+ if (!log_wb) {
170
+ res = exp(x) * cephes::polevl(a, A, 5);
171
+ } else {
172
+ // logarithm of Wright's function
173
+ res = x + std::log(cephes::polevl(a, A, 5));
174
+ }
175
+ } else {
176
+ /* Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)
177
+ * A[n] = a^n/n!
178
+ * But here, we repurpose A[n] = X[n] * B[n] / n!
179
+ * Note that polevl assumes inverse ordering => A[order] = 0th term */
180
+ double dg = digamma(b);
181
+ // pg1 = polygamma(1, b)
182
+ double pg1 = cephes::zeta(2, b);
183
+ if (order <= 2) {
184
+ res = 1 + a * x * (-dg + 0.5 * a * (1 + x) * (dg * dg - pg1));
185
+ } else {
186
+ if (order > 5) {
187
+ order = 5;
188
+ }
189
+ // pg2 = polygamma(2, b)
190
+ double pg2 = -2 * cephes::zeta(3, b);
191
+ B[0] = 1;
192
+ B[1] = -dg;
193
+ B[2] = dg * dg - pg1;
194
+ B[3] = (-dg * dg + 3 * pg1) * dg - pg2;
195
+ A[order] = 1;
196
+ A[order - 1] = X[1] * B[1];
197
+ A[order - 2] = X[2] * B[2] / 2.;
198
+ A[order - 3] = X[3] * B[3] / 6.;
199
+ if (order >= 4) {
200
+ // double pg3 = polygamma(3, b)
201
+ double pg3 = 6 * cephes::zeta(4, b);
202
+ B[4] = ((dg * dg - 6 * pg1) * dg + 4 * pg2) * dg + 3 * pg1 * pg1 - pg3;
203
+ A[order - 4] = X[4] * B[4] / 24.;
204
+ if (order >= 5) {
205
+ // pg4 = polygamma(4, b)
206
+ double pg4 = -24 * cephes::zeta(5, b);
207
+ B[5] =
208
+ ((((-dg * dg + 10 * pg1) * dg - 10 * pg2) * dg - 15 * pg1 * pg1 + 5 * pg3) * dg +
209
+ 10 * pg1 * pg2 - pg4);
210
+ A[order - 5] = X[5] * B[5] / 120.;
211
+ }
212
+ }
213
+ res = cephes::polevl(a, A, order);
214
+ }
215
+ // res *= exp(x) * rgamma(b)
216
+ if (!log_wb) {
217
+ res *= exp_rgamma(x, b);
218
+ } else {
219
+ // logarithm of Wright's function
220
+ res = x - cephes::lgam(b) + std::log(res);
221
+ }
222
+ }
223
+ return res;
224
+ }
225
+
226
+ template<bool log_wb>
227
+ SPECFUN_HOST_DEVICE inline double wb_asymptotic(double a, double b, double x) {
228
+ /* 4. Asymptotic expansion for large x up to order 8
229
+ *
230
+ * Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
231
+ *
232
+ * with Z = (a*x)^(1/(1+a)).
233
+ * Call: python _precompute/wright_bessel.py 3
234
+ */
235
+ double A[15]; // powers of a
236
+ double B[17]; // powers of b
237
+ double Ap1[9]; // powers of (1+a)
238
+ double C[9]; // coefficients of asymptotic series a_k
239
+
240
+ A[0] = 1.;
241
+ B[0] = 1.;
242
+ Ap1[0] = 1.;
243
+ for (int k = 1; k < 15; k++) {
244
+ A[k] = A[k - 1] * a;
245
+ }
246
+ for (int k = 1; k < 17; k++) {
247
+ B[k] = B[k - 1] * b;
248
+ }
249
+ for (int k = 1; k < 9; k++) {
250
+ Ap1[k] = Ap1[k - 1] * (1 + a);
251
+ }
252
+
253
+ C[0] = 1. / std::sqrt(2. * M_PI * Ap1[1]);
254
+
255
+ C[1] = C[0] / (24 * Ap1[1]);
256
+ C[1] *= (2 * a + 1) * (2 + a) - 12 * b * (1 + a - b);
257
+
258
+ C[2] = C[0] / (1152 * Ap1[2]);
259
+ C[2] *=
260
+ (144 * B[4] - 96 * B[3] * (5 * a + 1) + 24 * B[2] * (20 * A[2] + 5 * a - 4) -
261
+ 24 * b * Ap1[1] * (6 * A[2] - 7 * a - 2) + (a + 2) * (2 * a + 1) * (2 * A[2] - 19 * a + 2));
262
+
263
+ C[3] = C[0] / (414720 * Ap1[3]);
264
+ C[3] *=
265
+ (8640 * B[6] - 8640 * B[5] * (7 * a - 1) + 10800 * B[4] * (14 * A[2] - 7 * a - 2) -
266
+ 1440 * B[3] * (112 * A[3] - 147 * A[2] - 63 * a + 8) +
267
+ 180 * B[2] * (364 * A[4] - 1288 * A[3] - 567 * A[2] + 392 * a + 76) -
268
+ 180 * b * Ap1[1] * (20 * A[4] - 516 * A[3] + 417 * A[2] + 172 * a - 12) -
269
+ (a + 2) * (2 * a + 1) * (556 * A[4] + 1628 * A[3] - 9093 * A[2] + 1628 * a + 556));
270
+
271
+ C[4] = C[0] / (39813120 * Ap1[4]);
272
+ C[4] *=
273
+ (103680 * B[8] - 414720 * B[7] * (3 * a - 1) + 725760 * B[6] * a * (8 * a - 7) -
274
+ 48384 * B[5] * (274 * A[3] - 489 * A[2] + 39 * a + 26) +
275
+ 30240 * B[4] * (500 * A[4] - 1740 * A[3] + 495 * A[2] + 340 * a - 12) -
276
+ 2880 * B[3] * (2588 * A[5] - 19780 * A[4] + 14453 * A[3] + 9697 * A[2] - 1892 * a - 404) +
277
+ 48 * B[2] *
278
+ (11488 * A[6] - 547836 * A[5] + 1007484 * A[4] + 593353 * A[3] - 411276 * A[2] - 114396 * a + 4288) +
279
+ 48 * b * Ap1[1] *
280
+ (7784 * A[6] + 48180 * A[5] - 491202 * A[4] + 336347 * A[3] + 163734 * A[2] - 28908 * a - 5560) -
281
+ (a + 2) * (2 * a + 1) *
282
+ (4568 * A[6] - 226668 * A[5] - 465702 * A[4] + 2013479 * A[3] - 465702 * A[2] - 226668 * a + 4568));
283
+
284
+ C[5] = C[0] / (6688604160. * Ap1[5]);
285
+ C[5] *=
286
+ (1741824 * B[10] - 2903040 * B[9] * (11 * a - 5) + 2177280 * B[8] * (110 * A[2] - 121 * a + 14) -
287
+ 580608 * B[7] * (1628 * A[3] - 3333 * A[2] + 1023 * a + 52) +
288
+ 169344 * B[6] * (12364 * A[4] - 43648 * A[3] + 26763 * A[2] + 1232 * a - 788) -
289
+ 24192 * B[5] * (104852 * A[5] - 646624 * A[4] + 721391 * A[3] - 16841 * A[2] - 74096 * a + 148) +
290
+ 2016 * B[4] *
291
+ (710248 * A[6] - 8878716 * A[5] + 17928834 * A[4] - 3333407 * A[3] - 4339566 * A[2] + 287364 * a +
292
+ 89128) -
293
+ 1344 * B[3] *
294
+ (87824 * A[7] - 7150220 * A[6] + 29202756 * A[5] - 15113527 * A[4] - 14223011 * A[3] + 3462492 * A[2] +
295
+ 1137092 * a - 18896) -
296
+ 84 * B[2] *
297
+ (1690480 * A[8] + 14139136 * A[7] - 232575464 * A[6] + 296712592 * A[5] + 215856619 * A[4] -
298
+ 152181392 * A[3] - 47718440 * A[2] + 5813632 * a + 943216) +
299
+ 84 * b * Ap1[1] *
300
+ (82224 * A[8] - 5628896 * A[7] - 26466520 * A[6] + 168779208 * A[5] - 104808005 * A[4] -
301
+ 56259736 * A[3] + 15879912 * A[2] + 4020640 * a - 63952) +
302
+ (a + 2) * (2 * a + 1) *
303
+ (2622064 * A[8] + 12598624 * A[7] - 167685080 * A[6] - 302008904 * A[5] + 1115235367. * A[4] -
304
+ 302008904 * A[3] - 167685080 * A[2] + 12598624 * a + 2622064));
305
+
306
+ C[6] = C[0] / (4815794995200. * Ap1[6]);
307
+ C[6] *=
308
+ (104509440 * B[12] - 209018880 * B[11] * (13 * a - 7) + 574801920 * B[10] * (52 * A[2] - 65 * a + 12) -
309
+ 63866880 * B[9] * (2834 * A[3] - 6279 * A[2] + 2769 * a - 134) +
310
+ 23950080 * B[8] * (27404 * A[4] - 98228 * A[3] + 78663 * A[2] - 10868 * a - 1012) -
311
+ 13685760 * B[7] * (105612 * A[5] - 599196 * A[4] + 791843 * A[3] - 224913 * A[2] - 27612 * a + 4540) +
312
+ 2661120 * B[6] *
313
+ (693680 * A[6] - 6473532 * A[5] + 13736424 * A[4] - 7047469 * A[3] - 723840 * A[2] + 471588 * a + 7376
314
+ ) -
315
+ 2661120 * B[5] *
316
+ (432536 * A[7] - 7850804 * A[6] + 27531114 * A[5] - 24234457 * A[4] - 703001 * A[3] + 3633474 * A[2] -
317
+ 36244 * a - 45128) +
318
+ 166320 * B[4] *
319
+ (548912 * A[8] - 75660832 * A[7] + 502902712 * A[6] - 764807992 * A[5] + 91248287 * A[4] +
320
+ 217811464 * A[3] - 20365384 * A[2] - 9776416 * a + 37936) +
321
+ 10080 * B[3] *
322
+ (18759728 * A[9] + 165932208 * A[8] - 4710418440. * A[7] + 13686052536. * A[6] - 5456818809. * A[5] -
323
+ 6834514245. * A[4] + 1919299512. * A[3] + 752176152 * A[2] - 45661200 * a - 8616848) -
324
+ 360 * B[2] *
325
+ (32743360 * A[10] - 3381871792. * A[9] - 21488827776. * A[8] + 200389923864. * A[7] -
326
+ 198708005340. * A[6] - 171633799779. * A[5] + 123124874028. * A[4] + 40072774872. * A[3] -
327
+ 9137993280. * A[2] - 1895843248. * a + 18929728) -
328
+ 360 * b * Ap1[1] *
329
+ (57685408 * A[10] + 406929456 * A[9] - 6125375760. * A[8] - 27094918920. * A[7] +
330
+ 128752249410. * A[6] - 74866710561. * A[5] - 42917416470. * A[4] + 16256951352. * A[3] +
331
+ 4375268400. * A[2] - 316500688 * a - 47197152) +
332
+ (a + 2) * (2 * a + 1) *
333
+ (167898208 * A[10] - 22774946512. * A[9] - 88280004528. * A[8] + 611863976472. * A[7] +
334
+ 1041430242126. * A[6] - 3446851131657. * A[5] + 1041430242126. * A[4] + 611863976472. * A[3] -
335
+ 88280004528. * A[2] - 22774946512. * a + 167898208));
336
+
337
+ C[7] = C[0] / (115579079884800. * Ap1[7]);
338
+ C[7] *=
339
+ (179159040 * B[14] - 1254113280. * B[13] * (5 * a - 3) + 1358622720. * B[12] * (70 * A[2] - 95 * a + 22) -
340
+ 905748480 * B[11] * (904 * A[3] - 2109 * A[2] + 1119 * a - 112) +
341
+ 1245404160. * B[10] * (3532 * A[4] - 12824 * A[3] + 11829 * A[2] - 2824 * a + 44) -
342
+ 59304960 * B[9] * (256820 * A[5] - 1397680 * A[4] + 2025545 * A[3] - 869495 * A[2] + 52000 * a + 8788) +
343
+ 14826240 * B[8] *
344
+ (2274536 * A[6] - 18601572 * A[5] + 40698318 * A[4] - 28230079 * A[3] + 3916398 * A[2] + 832668 * a -
345
+ 65176) -
346
+ 59304960 * B[7] *
347
+ (760224 * A[7] - 9849164 * A[6] + 32495784 * A[5] - 34813869 * A[4] + 9175207 * A[3] + 1898688 * A[2] -
348
+ 469788 * a - 13184) +
349
+ 25945920 * B[6] *
350
+ (1167504 * A[8] - 28779840 * A[7] + 149752856 * A[6] - 246026112 * A[5] + 111944073 * A[4] +
351
+ 18341600 * A[3] - 12131496 * A[2] - 274368 * a + 102800) -
352
+ 157248 * B[5] *
353
+ (12341872 * A[9] - 3122991216. * A[8] + 29900054232. * A[7] - 78024816720. * A[6] +
354
+ 58914656739. * A[5] + 4637150811. * A[4] - 11523402480. * A[3] + 236218968 * A[2] + 337923216 * a +
355
+ 1592048) -
356
+ 28080 * B[4] *
357
+ (265154912 * A[10] + 2276098704. * A[9] - 105569461008. * A[8] + 496560666360. * A[7] -
358
+ 627891462858. * A[6] + 41935358025. * A[5] + 203913875814. * A[4] - 23984801544. * A[3] -
359
+ 13869306000. * A[2] + 372786832 * a + 103532640) +
360
+ 1440 * B[3] *
361
+ (310292864 * A[11] - 55169117872. * A[10] - 358957020112. * A[9] + 5714152556088. * A[8] -
362
+ 13241597459352. * A[7] + 4220720097141. * A[6] + 6845418090249. * A[5] - 2129559215808. * A[4] -
363
+ 909225098472. * A[3] + 107518582576. * A[2] + 25619444368. * a - 113832704) +
364
+ 12 * B[2] *
365
+ (135319651136. * A[12] + 1119107842176. * A[11] - 22193518174320. * A[10] - 133421793595520. * A[9] +
366
+ 860103051087996. * A[8] - 703353374803080. * A[7] - 704240127687381. * A[6] +
367
+ 513111704637960. * A[5] + 166909061348316. * A[4] - 57671564069120. * A[3] - 12453426246000. * A[2] +
368
+ 695901207936. * a + 93786157376.) -
369
+ 12 * b * Ap1[1] *
370
+ (4365353408. * A[12] - 720248637504. * A[11] - 4222331152560. * A[10] + 29413934270560. * A[9] +
371
+ 132123980710980. * A[8] - 511247376962820. * A[7] + 283403639131779. * A[6] +
372
+ 170415792320940. * A[5] - 79274388426588. * A[4] - 21009953050400. * A[3] + 3284035340880. * A[2] +
373
+ 589294339776. * a - 3693760576.) -
374
+ (a + 2) * (2 * a + 1) *
375
+ (34221025984. * A[12] + 226022948160. * A[11] - 5067505612464. * A[10] - 18868361443936. * A[9] +
376
+ 86215425028308. * A[8] + 143500920544692. * A[7] - 437682618704613. * A[6] + 143500920544692. * A[5] +
377
+ 86215425028308. * A[4] - 18868361443936. * A[3] - 5067505612464. * A[2] + 226022948160. * a +
378
+ 34221025984.));
379
+
380
+ C[8] = C[0] / (22191183337881600. * Ap1[8]);
381
+ C[8] *=
382
+ (2149908480. * B[16] - 5733089280. * B[15] * (17 * a - 11) +
383
+ 7166361600. * B[14] * (272 * A[2] - 391 * a + 104) -
384
+ 3344302080. * B[13] * (6766 * A[3] - 16371 * A[2] + 9741 * a - 1306) +
385
+ 1811496960. * B[12] * (93092 * A[4] - 341564 * A[3] + 344199 * A[2] - 104924 * a + 6308) -
386
+ 517570560 * B[11] *
387
+ (1626220 * A[5] - 8641508 * A[4] + 13274773 * A[3] - 6952303 * A[2] + 1007420 * a + 5564) +
388
+ 284663808 * B[10] *
389
+ (9979136 * A[6] - 75766892 * A[5] + 169256148 * A[4] - 136824959 * A[3] + 35714348 * A[2] -
390
+ 463692 * a - 293664) -
391
+ 1423319040. * B[9] *
392
+ (4466648 * A[7] - 49231116 * A[6] + 157507414 * A[5] - 187114257 * A[4] + 78372295 * A[3] -
393
+ 4470082 * A[2] - 1913996 * a + 82424) +
394
+ 266872320 * B[8] *
395
+ (33133136 * A[8] - 564264544 * A[7] + 2618606424. * A[6] - 4491310104. * A[5] + 2853943765. * A[4] -
396
+ 374694552 * A[3] - 135365288 * A[2] + 17623968 * a + 696912) -
397
+ 2156544 * B[7] *
398
+ (2914256144. * A[9] - 93491712432. * A[8] + 664876176984. * A[7] - 1661362937880. * A[6] +
399
+ 1563719627313. * A[5] - 382840842843. * A[4] - 115399415640. * A[3] + 34565562936. * A[2] +
400
+ 1609337232. * a - 217321904) +
401
+ 179712 * B[6] *
402
+ (1266018560. * A[10] - 789261834512. * A[9] + 10186841596896. * A[8] - 38877799073352. * A[7] +
403
+ 54334425968952. * A[6] - 22529574889533. * A[5] - 5132942328000. * A[4] + 3438377465592. * A[3] +
404
+ 84287641248. * A[2] - 72493479440. * a - 807415936) +
405
+ 13824 * B[5] *
406
+ (156356794976. * A[11] + 1180898077328. * A[10] - 90615270907936. * A[9] + 609258947056248. * A[8] -
407
+ 1312655191366722. * A[7] + 885900509321745. * A[6] + 112162151855265. * A[5] -
408
+ 212803071513258. * A[4] + 6805217831352. * A[3] + 10051742651296. * A[2] - 55035924848. * a -
409
+ 52946379296.) -
410
+ 576 * B[4] *
411
+ (143943926464. * A[12] - 60115486481856. * A[11] - 376366989757200. * A[10] +
412
+ 9534223075576160. * A[9] - 35603777465262396. * A[8] + 39375990156664980. * A[7] -
413
+ 868175004137259. * A[6] - 14279180718355020. * A[5] + 1985747535239364. * A[4] +
414
+ 1264001337603680. * A[3] - 75972792514320. * A[2] - 23855850572736. * a - 4996648256.) -
415
+ 384 * B[3] *
416
+ (2038525473856. * A[13] + 16057322146112. * A[12] - 502133360559024. * A[11] -
417
+ 2985686417468080. * A[10] + 32418922182093292. * A[9] - 63665380623022452. * A[8] +
418
+ 16481208821092575. * A[7] + 34161547357596099. * A[6] - 11490298497454932. * A[5] -
419
+ 5117272758337156. * A[4] + 933703210750480. * A[3] + 234855186762000. * A[2] - 7860524600000. * a -
420
+ 1226607567040.) +
421
+ 96 * B[2] *
422
+ (324439754752. * A[14] - 77231415197120. * A[13] - 539102931841856. * A[12] +
423
+ 4618258299956336. * A[11] + 28588485529469792. * A[10] - 141383982651179428. * A[9] +
424
+ 98783147840417772. * A[8] + 112831723492305801. * A[7] - 83329761150975036. * A[6] -
425
+ 26553582937192900. * A[5] + 12469117738765952. * A[4] + 2587165396642160. * A[3] -
426
+ 340406368038080. * A[2] - 53659641606080. * a + 219671272960.) +
427
+ 96 * b * Ap1[1] *
428
+ (1026630779520. * A[14] + 8781958472768. * A[13] - 210659786204384. * A[12] -
429
+ 1222283505284208. * A[11] + 5064251967491416. * A[10] + 24013052207628140. * A[9] -
430
+ 79710880160087370. * A[8] + 42596558293213227. * A[7] + 26570293386695790. * A[6] -
431
+ 14407831324576884. * A[5] - 3617322833922440. * A[4] + 950664948554384. * A[3] +
432
+ 172358006894496. * A[2] - 7430887938496. * a - 889746675584.) -
433
+ (a + 2) * (2 * a + 1) *
434
+ (573840801152. * A[14] - 156998277198784. * A[13] - 898376974770592. * A[12] +
435
+ 8622589006459984. * A[11] + 32874204024803560. * A[10] - 111492707520083828. * A[9] -
436
+ 184768503480287646. * A[8] + 528612016938984183. * A[7] - 184768503480287646. * A[6] -
437
+ 111492707520083828. * A[5] + 32874204024803560. * A[4] + 8622589006459984. * A[3] -
438
+ 898376974770592. * A[2] - 156998277198784. * a + 573840801152.));
439
+
440
+ double Z = std::pow(a * x, 1 / Ap1[1]);
441
+ double Zp = 1.;
442
+ double res = C[0];
443
+ for (int k = 1; k < 9; k++) {
444
+ Zp /= Z;
445
+ res += (k % 2 == 0 ? 1 : -1) * C[k] * Zp;
446
+ }
447
+ if (!log_wb) {
448
+ res *= std::pow(Z, 0.5 - b) * std::exp(Ap1[1] / a * Z);
449
+ } else {
450
+ // logarithm of Wright's function
451
+ res = std::log(Z) * (0.5 - b) + Ap1[1] / a * Z + std::log(res);
452
+ }
453
+ return res;
454
+ }
455
+
456
+ SPECFUN_HOST_DEVICE inline double wb_Kmod(double exp_term, double eps, double a, double b, double x, double r) {
457
+ /* Compute integrand Kmod(eps, a, b, x, r) for Gauss-Laguerre quadrature.
458
+ *
459
+ * K(a, b, x, r+eps) = exp(-r-eps) * Kmod(eps, a, b, x, r)
460
+ *
461
+ * Kmod(eps, a, b, x, r) = exp(x * (r+eps)^(-a) * cos(pi*a)) * (r+eps)^(-b)
462
+ * * sin(x * (r+eps)^(-a) * sin(pi*a) + pi * b)
463
+ *
464
+ * Note that we additionally factor out exp(exp_term) which helps with large
465
+ * terms in the exponent of exp(...)
466
+ */
467
+ double x_r_a = x * std::pow(r + eps, -a);
468
+ return std::exp(x_r_a * cephes::cospi(a) + exp_term) * std::pow(r + eps, -b) *
469
+ std::sin(x_r_a * cephes::sinpi(a) + M_PI * b);
470
+ }
471
+
472
+ SPECFUN_HOST_DEVICE inline double wb_P(double exp_term, double eps, double a, double b, double x, double phi) {
473
+ /* Compute integrand P for Gauss-Legendre quadrature.
474
+ *
475
+ * P(eps, a, b, x, phi) = exp(eps * cos(phi) + x * eps^(-a) * cos(a*phi))
476
+ * * cos(eps * sin(phi) - x * eps^(-a) * sin(a*phi)
477
+ * + (1-b)*phi)
478
+ *
479
+ * Note that we additionally factor out exp(exp_term) which helps with large
480
+ * terms in the exponent of exp(...)
481
+ */
482
+ double x_eps_a = x * std::pow(eps, -a);
483
+ return std::exp(eps * std::cos(phi) + x_eps_a * std::cos(a * phi) + exp_term) *
484
+ std::cos(eps * std::sin(phi) - x_eps_a * std::sin(a * phi) + (1 - b) * phi);
485
+ }
486
+
487
+ /* roots of laguerre polynomial of order 50
488
+ * scipy.special.roots_laguerre(50)[0] or
489
+ * sympy.integrals.quadrature.import gauss_laguerre(50, 16)[0] */
490
+ constexpr double wb_x_laguerre[] = {
491
+ 0.02863051833937908, 0.1508829356769337, 0.3709487815348964, 0.6890906998810479, 1.105625023539913,
492
+ 1.620961751102501, 2.23561037591518, 2.950183366641835, 3.765399774405782, 4.682089387559285,
493
+ 5.70119757478489, 6.823790909794551, 8.051063669390792, 9.384345308258407, 10.82510903154915,
494
+ 12.37498160875746, 14.03575459982991, 15.80939719784467, 17.69807093335025, 19.70414653546156,
495
+ 21.83022330657825, 24.0791514444115, 26.45405784125298, 28.95837601193738, 31.59588095662286,
496
+ 34.37072996309045, 37.28751061055049, 40.35129757358607, 43.56772026999502, 46.94304399160304,
497
+ 50.48426796312992, 54.19924488016862, 58.09682801724853, 62.18705417568891, 66.48137387844482,
498
+ 70.99294482661949, 75.73701154772731, 80.73140480247769, 85.99721113646323, 91.55969041253388,
499
+ 97.44956561485056, 103.7048912366923, 110.3738588076403, 117.5191982031112, 125.2254701334734,
500
+ 133.6120279227287, 142.8583254892541, 153.2603719726036, 165.3856433166825, 180.6983437092145
501
+ };
502
+ /* weights for laguerre polynomial of order 50
503
+ * sympy.integrals.quadrature.import gauss_laguerre(50, 16)[1] */
504
+ constexpr double wb_w_laguerre[] = {
505
+ 0.07140472613518988, 0.1471486069645884, 0.1856716275748313, 0.1843853825273539,
506
+ 0.1542011686063556, 0.1116853699022688, 0.07105288549019586, 0.04002027691150833,
507
+ 0.02005062308007171, 0.008960851203646281, 0.00357811241531566, 0.00127761715678905,
508
+ 0.0004080302449837189, 0.0001165288322309724, 2.974170493694165e-5, 6.777842526542028e-6,
509
+ 1.37747950317136e-6, 2.492886181720092e-7, 4.010354350427827e-8, 5.723331748141425e-9,
510
+ 7.229434249182665e-10, 8.061710142281779e-11, 7.913393099943723e-12, 6.81573661767678e-13,
511
+ 5.13242671658949e-14, 3.365624762437814e-15, 1.913476326965035e-16, 9.385589781827253e-18,
512
+ 3.950069964503411e-19, 1.417749517827512e-20, 4.309970276292175e-22, 1.101257519845548e-23,
513
+ 2.344617755608987e-25, 4.11854415463823e-27, 5.902246763596448e-29, 6.812008916553065e-31,
514
+ 6.237449498812102e-33, 4.452440579683377e-35, 2.426862352250487e-37, 9.852971481049686e-40,
515
+ 2.891078872318428e-42, 5.906162708112361e-45, 8.01287459750397e-48, 6.789575424396417e-51,
516
+ 3.308173010849252e-54, 8.250964876440456e-58, 8.848728128298018e-62, 3.064894889844417e-66,
517
+ 1.988708229330752e-71, 6.049567152238783e-78
518
+ };
519
+ /* roots of legendre polynomial of order 50
520
+ * sympy.integrals.quadrature.import gauss_legendre(50, 16)[0] */
521
+ constexpr double wb_x_legendre[] = {
522
+ -0.998866404420071, -0.9940319694320907, -0.9853540840480059, -0.9728643851066921, -0.9566109552428079,
523
+ -0.9366566189448779, -0.9130785566557919, -0.885967979523613, -0.8554297694299461, -0.8215820708593359,
524
+ -0.7845558329003993, -0.7444943022260685, -0.7015524687068223, -0.6558964656854394, -0.6077029271849502,
525
+ -0.5571583045146501, -0.5044581449074642, -0.4498063349740388, -0.3934143118975651, -0.3355002454194374,
526
+ -0.276288193779532, -0.2160072368760418, -0.1548905899981459, -0.09317470156008614, -0.03109833832718888,
527
+ 0.03109833832718888, 0.09317470156008614, 0.1548905899981459, 0.2160072368760418, 0.276288193779532,
528
+ 0.3355002454194374, 0.3934143118975651, 0.4498063349740388, 0.5044581449074642, 0.5571583045146501,
529
+ 0.6077029271849502, 0.6558964656854394, 0.7015524687068223, 0.7444943022260685, 0.7845558329003993,
530
+ 0.8215820708593359, 0.8554297694299461, 0.885967979523613, 0.9130785566557919, 0.9366566189448779,
531
+ 0.9566109552428079, 0.9728643851066921, 0.9853540840480059, 0.9940319694320907, 0.998866404420071
532
+ };
533
+ /* weights for legendre polynomial of order 50
534
+ * sympy.integrals.quadrature.import gauss_legendre(50, 16)[1] */
535
+ constexpr double wb_w_legendre[] = {
536
+ 0.002908622553155141, 0.006759799195745401, 0.01059054838365097, 0.01438082276148557, 0.01811556071348939,
537
+ 0.02178024317012479, 0.02536067357001239, 0.0288429935805352, 0.03221372822357802, 0.03545983561514615,
538
+ 0.03856875661258768, 0.0415284630901477, 0.04432750433880328, 0.04695505130394843, 0.04940093844946632,
539
+ 0.05165570306958114, 0.05371062188899625, 0.05555774480621252, 0.05718992564772838, 0.05860084981322245,
540
+ 0.05978505870426546, 0.06073797084177022, 0.06145589959031666, 0.06193606742068324, 0.06217661665534726,
541
+ 0.06217661665534726, 0.06193606742068324, 0.06145589959031666, 0.06073797084177022, 0.05978505870426546,
542
+ 0.05860084981322245, 0.05718992564772838, 0.05555774480621252, 0.05371062188899625, 0.05165570306958114,
543
+ 0.04940093844946632, 0.04695505130394843, 0.04432750433880328, 0.0415284630901477, 0.03856875661258768,
544
+ 0.03545983561514615, 0.03221372822357802, 0.0288429935805352, 0.02536067357001239, 0.02178024317012479,
545
+ 0.01811556071348939, 0.01438082276148557, 0.01059054838365097, 0.006759799195745401, 0.002908622553155141
546
+ };
547
+ /* Fitted parameters for optimal choice of eps
548
+ * Call: python _precompute/wright_bessel.py 4 */
549
+ constexpr double wb_A[] = {0.41037, 0.30833, 6.9952, 18.382, -2.8566, 2.1122};
550
+
551
+ template<bool log_wb>
552
+ SPECFUN_HOST_DEVICE inline double wright_bessel_integral(double a, double b, double x) {
553
+ /* 5. Integral representation
554
+ *
555
+ * K(a, b, x, r) = exp(-r + x * r^(-a) * cos(pi*a)) * r^(-b)
556
+ * * sin(x * r^(-a) * sin(pi*a) + pi * b)
557
+ * P(eps, a, b, x, phi) = exp(eps * cos(phi) + x * eps^(-a) * cos(a*phi))
558
+ * * cos(eps * sin(phi) - x * eps^(-a) * sin(a*phi)
559
+ * + (1-b)*phi)
560
+ *
561
+ * Phi(a, b, x) = 1/pi * int_eps^inf K(a, b, x, r) * dr
562
+ * + eps^(1-b)/pi * int_0^pi P(eps, a, b, x, phi) * dphi
563
+ *
564
+ * for any eps > 0.
565
+ *
566
+ * Note that P has a misprint in Luchko (2008) Eq. 9, the cos(phi(beta-1)) at
567
+ * the end of the first line should be removed and the −sin(phi(beta−1)) at
568
+ * the end of the second line should read +(1-b)*phi.
569
+ * This integral representation introduced the free parameter eps (from the
570
+ * radius of complex contour integration). We try to choose eps such that
571
+ * the integrand behaves smoothly. Note that this is quite diffrent from how
572
+ * Luchko (2008) deals with eps: he is either looking for the limit eps -> 0
573
+ * or he sets (silently) eps=1. But having the freedom to set eps is much more
574
+ * powerful for numerical evaluation.
575
+ *
576
+ * As K has a leading exp(-r), we factor this out and apply Gauss-Laguerre
577
+ * quadrature rule:
578
+ *
579
+ * int_0^inf K(a, b, x, r+eps) dr = exp(-eps) int_0^inf exp(-r) Kmod(.., r) dr
580
+ *
581
+ * Note the shift r -> r+eps to have integation from 0 to infinity.
582
+ * The integral over P is done via a Gauss-Legendre quadrature rule.
583
+ *
584
+ * Note: Hardest argument range is large z, large b and small eps.
585
+ */
586
+
587
+ /* We use the free choice of eps to make the integral better behaved.
588
+ * 1. Concern is oscillatory behaviour of P. Therefore, we'd like to
589
+ * make the change in the argument of cosine small, i.e. make arc length
590
+ * int_0^phi sqrt(1 + f'(phi)^2) dphi small, with
591
+ * f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a*phi) + (1-b)*phi
592
+ * Proxy, make |f'(phi)| small.
593
+ * 2. Concern is int_0 K ~ int_0 (r+eps)^(-b) .. dr
594
+ * This is difficult as r -> 0 for large b. It behaves better for larger
595
+ * values of eps.
596
+ */
597
+
598
+ // Minimize oscillatory behavoir of P
599
+ double eps =
600
+ (wb_A[0] * b * std::exp(-0.5 * a) +
601
+ std::exp(
602
+ wb_A[1] + 1 / (1 + a) * std::log(x) - wb_A[2] * std::exp(-wb_A[3] * a) +
603
+ wb_A[4] / (1 + std::exp(wb_A[5] * a))
604
+ ));
605
+
606
+ if (a >= 4 && x >= 100) {
607
+ eps += 1; // This part is hard to fit
608
+ }
609
+
610
+ // Large b
611
+ if (b >= 8) {
612
+ /* Make P small compared to K by setting eps large enough.
613
+ * int K ~ exp(-eps) and int P ~ eps^(1-b) */
614
+ eps = std::fmax(eps, std::pow(b, -b / (1. - b)) + 0.1 * b);
615
+ }
616
+
617
+ // safeguard, higher better for larger a, lower better for tiny a.
618
+ eps = std::fmin(eps, 150.);
619
+ eps = std::fmax(eps, 3.); // 3 seems to be a pretty good choice in general.
620
+
621
+ // We factor out exp(-exp_term) from wb_Kmod and wb_P to avoid overflow of
622
+ // exp(..).
623
+ double exp_term = 0;
624
+ // From the exponent of K:
625
+ double r = wb_x_laguerre[50-1]; // largest value of x used in wb_Kmod
626
+ double x_r_a = x * std::pow(r + eps, -a);
627
+ exp_term = std::fmax(exp_term, x_r_a * cephes::cospi(a));
628
+ // From the exponent of P:
629
+ double x_eps_a = x * std::pow(eps, -a);
630
+ // phi = 0 => cos(phi) = cos(a * phi) = 1
631
+ exp_term = std::fmax(exp_term, eps + x_eps_a);
632
+ // phi = pi => cos(phi) = -1
633
+ exp_term = std::fmax(exp_term, -eps + x_eps_a * cephes::cospi(a));
634
+
635
+ double res1 = 0;
636
+ double res2 = 0;
637
+
638
+ double y;
639
+ for (int k = 0; k < 50; k++) {
640
+ res1 += wb_w_laguerre[k] * wb_Kmod(-exp_term, eps, a, b, x, wb_x_laguerre[k]);
641
+ // y = (b-a)*(x+1)/2.0 + a for integration from a=0 to b=pi
642
+ y = M_PI * (wb_x_legendre[k] + 1) / 2.0;
643
+ res2 += wb_w_legendre[k] * wb_P(-exp_term, eps, a, b, x, y);
644
+ }
645
+ res1 *= std::exp(-eps);
646
+ // (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1)
647
+ res2 *= M_PI / 2.0;
648
+ res2 *= std::pow(eps, 1 - b);
649
+
650
+ if (!log_wb) {
651
+ // Remember the factored out exp_term from wb_Kmod and wb_P
652
+ return std::exp(exp_term) / M_PI * (res1 + res2);
653
+ } else {
654
+ // logarithm of Wright's function
655
+ return exp_term + std::log((res1 + res2) / M_PI);
656
+ }
657
+ }
658
+ } // namespace detail
659
+
660
+ template<bool log_wb>
661
+ SPECFUN_HOST_DEVICE inline double wright_bessel_t(double a, double b, double x) {
662
+ /* Compute Wright's generalized Bessel function for scalar arguments.
663
+ *
664
+ * According to [1], it is an entire function defined as
665
+ *
666
+ * .. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)}
667
+ *
668
+ * So far, only non-negative values of rho=a, beta=b and z=x are implemented.
669
+ * There are 5 different approaches depending on the ranges of the arguments:
670
+ *
671
+ * 1. Taylor series expansion in x=0 [1], for x <= 1.
672
+ * Involves gamma funtions in each term.
673
+ * 2. Taylor series expansion in x=0 [2], for large a.
674
+ * 3. Taylor series in a=0, for tiny a and not too large x.
675
+ * 4. Asymptotic expansion for large x [3, 4].
676
+ * Suitable for large x while still small a and b.
677
+ * 5. Integral representation [5], in principle for all arguments.
678
+ *
679
+ * References
680
+ * ----------
681
+ * [1] https://dlmf.nist.gov/10.46.E1
682
+ * [2] P. K. Dunn, G. K. Smyth (2005), Series evaluation of Tweedie exponential
683
+ * dispersion model densities. Statistics and Computing 15 (2005): 267-280.
684
+ * [3] E. M. Wright (1935), The asymptotic expansion of the generalized Bessel
685
+ * function. Proc. London Math. Soc. (2) 38, pp. 257-270.
686
+ * https://doi.org/10.1112/plms/s2-38.1.257
687
+ * [4] R. B. Paris (2017), The asymptotics of the generalised Bessel function,
688
+ * Mathematica Aeterna, Vol. 7, 2017, no. 4, 381 - 406,
689
+ * https://arxiv.org/abs/1711.03006
690
+ * [5] Y. F. Luchko (2008), Algorithms for Evaluation of the Wright Function for
691
+ * the Real Arguments' Values, Fractional Calculus and Applied Analysis 11(1)
692
+ * http://sci-gems.math.bas.bg/jspui/bitstream/10525/1298/1/fcaa-vol11-num1-2008-57p-75p.pdf
693
+ */
694
+ if (std::isnan(a) || std::isnan(b) || std::isnan(x)) {
695
+ return std::numeric_limits<double>::quiet_NaN();
696
+ }
697
+ if (a < 0 || b < 0 || x < 0) {
698
+ set_error("wright_bessel", SF_ERROR_DOMAIN, NULL);
699
+ return std::numeric_limits<double>::quiet_NaN();
700
+ }
701
+ if (std::isinf(x)) {
702
+ if (std::isinf(a) || std::isinf(b)) {
703
+ return std::numeric_limits<double>::quiet_NaN();
704
+ }
705
+ return std::numeric_limits<double>::infinity();
706
+ }
707
+ if (std::isinf(a) || std::isinf(b)) {
708
+ return std::numeric_limits<double>::quiet_NaN(); // or 0
709
+ }
710
+ if (a >= detail::rgamma_zero || b >= detail::rgamma_zero) {
711
+ set_error("wright_bessel", SF_ERROR_OVERFLOW, NULL);
712
+ return std::numeric_limits<double>::quiet_NaN();
713
+ }
714
+ if (x == 0) {
715
+ // return rgamma(b)
716
+ if (!log_wb) {
717
+ return cephes::rgamma(b);
718
+ } else {
719
+ // logarithm of Wright's function
720
+ return -cephes::lgam(b);
721
+ }
722
+ }
723
+ if (a == 0) {
724
+ // return exp(x) * rgamma(b)
725
+ if (!log_wb) {
726
+ return detail::exp_rgamma(x, b);
727
+ } else {
728
+ // logarithm of Wright's function
729
+ return x - cephes::lgam(b);
730
+ }
731
+ }
732
+
733
+ constexpr double exp_inf = 709.78271289338403;
734
+ int order;
735
+ if ((a <= 1e-3 && b <= 50 && x <= 9) || (a <= 1e-4 && b <= 70 && x <= 100) ||
736
+ (a <= 1e-5 && b <= 170 && (x < exp_inf || (log_wb && x <= 1e3)))) {
737
+ /* Taylor Series expansion in a=0 to order=order => precision <= 1e-11
738
+ * If beta is also small => precision <= 1e-11.
739
+ * max order = 5 */
740
+ if (a <= 1e-5) {
741
+ if (x <= 1) {
742
+ order = 2;
743
+ } else if (x <= 10) {
744
+ order = 3;
745
+ } else if (x <= 100) {
746
+ order = 4;
747
+ } else { // x < exp_inf
748
+ order = 5;
749
+ }
750
+ } else if (a <= 1e-4) {
751
+ if (x <= 1e-2) {
752
+ order = 2;
753
+ } else if (x <= 1) {
754
+ order = 3;
755
+ } else if (x <= 10) {
756
+ order = 4;
757
+ } else { // x <= 100
758
+ order = 5;
759
+ }
760
+ } else { // a <= 1e-3
761
+ if (x <= 1e-5) {
762
+ order = 2;
763
+ } else if (x <= 1e-1) {
764
+ order = 3;
765
+ } else if (x <= 1) {
766
+ order = 4;
767
+ } else { // x <= 9
768
+ order = 5;
769
+ }
770
+ }
771
+
772
+ return detail::wb_small_a<log_wb>(a, b, x, order);
773
+ }
774
+
775
+ if (x <= 1) {
776
+ // 18 term Taylor Series => error mostly smaller 5e-14
777
+ double res = detail::wb_series(a, b, x, 0, 18);
778
+ if (log_wb) res = std::log(res);
779
+ return res;
780
+ }
781
+ if (x <= 2) {
782
+ // 20 term Taylor Series => error mostly smaller 1e-12 to 1e-13
783
+ return detail::wb_series(a, b, x, 0, 20);
784
+ }
785
+ if (a >= 5) {
786
+ /* Taylor series around the approximate maximum term.
787
+ * Set number of terms=order. */
788
+ if (a >= 10) {
789
+ if (x <= 1e11) {
790
+ order = 6;
791
+ } else {
792
+ order = static_cast<int>(std::fmin(std::log10(x) - 5 + b / 10, 30));
793
+ }
794
+ } else {
795
+ if (x <= 1e4) {
796
+ order = 6;
797
+ } else if (x <= 1e8) {
798
+ order = static_cast<int>(2 * std::log10(x));
799
+ } else if (x <= 1e10) {
800
+ order = static_cast<int>(4 * std::log10(x) - 16);
801
+ } else {
802
+ order = static_cast<int>(std::fmin(6 * std::log10(x) - 36, 100));
803
+ }
804
+ }
805
+ return detail::wb_large_a<log_wb>(a, b, x, order);
806
+ }
807
+ if (std::pow(a * x, 1 / (1. + a)) >= 14 + b * b / (2 * (1 + a))) {
808
+ /* Asymptotic expansion in Z = (a*x)^(1/(1+a)) up to 8th term 1/Z^8.
809
+ * For 1/Z^k, the highest term in b is b^(2*k) * a0 / (2^k k! (1+a)^k).
810
+ * As a0 is a common factor to all orders, this explains a bit the
811
+ * domain of good convergence set above.
812
+ * => precision ~ 1e-11 but can go down to ~1e-8 or 1e-7
813
+ * Note: We ensured a <= 5 as this is a bad approximation for large a. */
814
+ return detail::wb_asymptotic<log_wb>(a, b, x);
815
+ }
816
+ if (0.5 <= a && a <= 1.8 && 100 <= b && 1e5 <= x) {
817
+ // This is a very hard domain. This condition is placed after wb_asymptotic.
818
+ // TODO: Explore ways to cover this domain.
819
+ return std::numeric_limits<double>::quiet_NaN();
820
+ }
821
+ return detail::wright_bessel_integral<log_wb>(a, b, x);
822
+ }
823
+
824
+
825
+ SPECFUN_HOST_DEVICE inline double wright_bessel(double a, double b, double x) {
826
+ return wright_bessel_t<false>(a, b, x);
827
+ }
828
+
829
+ SPECFUN_HOST_DEVICE inline float wright_bessel(float a, float b, float x) {
830
+ return wright_bessel(static_cast<double>(a), static_cast<double>(b), static_cast<double>(x));
831
+ }
832
+
833
+ SPECFUN_HOST_DEVICE inline double log_wright_bessel(double a, double b, double x) {
834
+ return wright_bessel_t<true>(a, b, x);
835
+ }
836
+
837
+ SPECFUN_HOST_DEVICE inline float log_wright_bessel(float a, float b, float x) {
838
+ return log_wright_bessel(static_cast<double>(a), static_cast<double>(b), static_cast<double>(x));
839
+ }
840
+
841
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/special/zlog1.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Translated from Cython into C++ by SciPy developers in 2023.
2
+ *
3
+ * Original author: Josh Wilson, 2016.
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include "config.h"
9
+
10
+ namespace special {
11
+ namespace detail {
12
+
13
+ SPECFUN_HOST_DEVICE inline std::complex<double> zlog1(std::complex<double> z) {
14
+ /* Compute log, paying special attention to accuracy around 1. We
15
+ * implement this ourselves because some systems (most notably the
16
+ * Travis CI machines) are weak in this regime. */
17
+ std::complex<double> coeff = -1.0;
18
+ std::complex<double> res = 0.0;
19
+
20
+ if (std::abs(z - 1.0) > 0.1) {
21
+ return std::log(z);
22
+ }
23
+
24
+ z -= 1.0;
25
+ for (int n = 1; n < 17; n++) {
26
+ coeff *= -z;
27
+ res += coeff / static_cast<double>(n);
28
+ if (std::abs(res / coeff) < std::numeric_limits<double>::epsilon()) {
29
+ break;
30
+ }
31
+ }
32
+ return res;
33
+ }
34
+ } // namespace detail
35
+ } // namespace special
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (171 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc ADDED
Binary file (3.64 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc ADDED
Binary file (78.7 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc ADDED
Binary file (817 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spence.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ufunc_signatures.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/special/tests/data/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_validate_sparse_csc_tensor_args_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
26
+ inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
27
+ return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_weight_norm_interface_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim=0) {
27
+ return at::_ops::_weight_norm_interface::call(v, g, dim);
28
+ }
29
+
30
+ // aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) {
32
+ return at::_ops::_weight_norm_interface_out::call(v, g, dim, out0, out1);
33
+ }
34
+ // aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_outf(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
36
+ return at::_ops::_weight_norm_interface_out::call(v, g, dim, out0, out1);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Tensor & other);
23
+ TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace compositeimplicitautograd
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/arctanh_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::arctanh(Tensor self) -> Tensor
26
+ inline at::Tensor arctanh(const at::Tensor & self) {
27
+ return at::_ops::arctanh::call(self);
28
+ }
29
+
30
+ // aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
31
+ inline at::Tensor & arctanh_(at::Tensor & self) {
32
+ return at::_ops::arctanh_::call(self);
33
+ }
34
+
35
+ // aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self) {
37
+ return at::_ops::arctanh_out::call(self, out);
38
+ }
39
+ // aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out) {
41
+ return at::_ops::arctanh_out::call(self, out);
42
+ }
43
+
44
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_ops.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API bitwise_right_shift_Tensor {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
26
+ };
27
+
28
+ struct TORCH_API bitwise_right_shift__Tensor {
29
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
37
+ };
38
+
39
+ struct TORCH_API bitwise_right_shift_Tensor_out {
40
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
48
+ };
49
+
50
+ struct TORCH_API bitwise_right_shift_Tensor_Scalar {
51
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor")
57
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
58
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
59
+ };
60
+
61
+ struct TORCH_API bitwise_right_shift__Tensor_Scalar {
62
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift_")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
68
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
70
+ };
71
+
72
+ struct TORCH_API bitwise_right_shift_Tensor_Scalar_out {
73
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
81
+ };
82
+
83
+ struct TORCH_API bitwise_right_shift_Scalar_Tensor {
84
+ using schema = at::Tensor (const at::Scalar &, const at::Tensor &);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift")
88
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor")
89
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
90
+ static at::Tensor call(const at::Scalar & self, const at::Tensor & other);
91
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other);
92
+ };
93
+
94
+ struct TORCH_API bitwise_right_shift_Scalar_Tensor_out {
95
+ using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_right_shift")
99
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out")
100
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
101
+ static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
102
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
103
+ };
104
+
105
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_cpu_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1);
21
+ TORCH_API at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1);
22
+ TORCH_API at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out);
23
+ TORCH_API at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1);
24
+
25
+ } // namespace cpu
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API hypot_out {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API hypot {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot(Tensor self, Tensor other) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
37
+ };
38
+
39
+ struct TORCH_API hypot_ {
40
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot_")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
46
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor linalg_inv(const at::Tensor & A);
21
+ TORCH_API at::Tensor & linalg_inv_out(at::Tensor & out, const at::Tensor & A);
22
+ TORCH_API at::Tensor & linalg_inv_outf(const at::Tensor & A, at::Tensor & out);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_dropout_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/native_layer_norm_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
27
+ return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
32
+ return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask);
33
+ }
34
+ }
35
+
36
+ // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
37
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
38
+ return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
43
+ return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
44
+ }
45
+ }
46
+
47
+ // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
48
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
49
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
53
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
54
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
55
+ }
56
+ }
57
+
58
+ // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
59
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
60
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
64
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
65
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
66
+ }
67
+ }
68
+
69
+ // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
70
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
71
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
75
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
76
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
77
+ }
78
+ }
79
+
80
+ // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
81
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
82
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
86
+ ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
87
+ return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
88
+ }
89
+ }
90
+
91
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & pixel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t upscale_factor);
21
+ TORCH_API at::Tensor & pixel_shuffle_outf(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API quantized_rnn_relu_cell {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantized_rnn_relu_cell")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor slice(const at::Tensor & self, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor special_erfinv(const at::Tensor & self);
21
+ TORCH_API at::Tensor & special_erfinv_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & special_erfinv_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/split.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/split_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
26
+ inline ::std::vector<at::Tensor> split(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
27
+ return at::_ops::split_Tensor::call(self, split_size, dim);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ ::std::vector<at::Tensor> split(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
32
+ return at::_ops::split_Tensor::call(self, split_size, dim);
33
+ }
34
+ }
35
+
36
+ // aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
37
+ inline ::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
38
+ return at::_ops::split_Tensor::call(self, split_size, dim);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ ::std::vector<at::Tensor> split(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
43
+ return at::_ops::split_Tensor::call(self, split_size, dim);
44
+ }
45
+ }
46
+
47
+ // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
48
+ inline ::std::vector<at::Tensor> split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) {
49
+ return at::_ops::split_sizes::call(self, c10::fromIntArrayRefSlow(split_size), dim);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
53
+ ::std::vector<at::Tensor> split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) {
54
+ return at::_ops::split_sizes::call(self, c10::fromIntArrayRefSlow(split_size), dim);
55
+ }
56
+ }
57
+
58
+ // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
59
+ inline ::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) {
60
+ return at::_ops::split_sizes::call(self, split_size, dim);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
64
+ ::std::vector<at::Tensor> split(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) {
65
+ return at::_ops::split_sizes::call(self, split_size, dim);
66
+ }
67
+ }
68
+
69
+ }
vllm/lib/python3.10/site-packages/dotenv/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
vllm/lib/python3.10/site-packages/dotenv/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (305 Bytes). View file
 
vllm/lib/python3.10/site-packages/dotenv/__pycache__/cli.cpython-310.pyc ADDED
Binary file (5.97 kB). View file
 
vllm/lib/python3.10/site-packages/dotenv/__pycache__/ipython.cpython-310.pyc ADDED
Binary file (1.46 kB). View file
 
vllm/lib/python3.10/site-packages/dotenv/__pycache__/main.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
vllm/lib/python3.10/site-packages/dotenv/__pycache__/parser.cpython-310.pyc ADDED
Binary file (6.13 kB). View file