diff --git a/.gitattributes b/.gitattributes index c2033a5d1bae32f85cc6a51fbc2fe63184f1b222..e5166ec81dce425b97f2a8223ffce6b55b332eeb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -786,3 +786,11 @@ phi4/lib/python3.10/site-packages/numpy/_core/lib/libnpymath.a filter=lfs diff=l phi4/lib/python3.10/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text openflamingo/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text openflamingo/lib/python3.10/site-packages/scipy/io/matlab/_mio5_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +phi4/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/openflamingo/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..94355e55cff7245de503708d8978040caa8a6a8b --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c39ee117a6cbc87241a50a1b1b7047fa515577945ba129a92c814fc412ee07a0 +size 1201320 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3e1393d58c440c43562f3efea5161682595330ba --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e10940ad4a98bb79181b57eb89e4d5276eed0f3bd2a265cd3ec44cab53574076 +size 633088 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ba30fa3a36c41f605a7e96df0cb8cff5814c7c70 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e5736df7906bb83eeda658132fa48dc14e5bccb14a75bc36d9fd3e31c80095 +size 372704 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d5393d27593a7abd11862fb703f981a888b4054a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec6a3bda2516baea6b3fa92a36206684d7d58385879c6d7173dcd717866c619 +size 525696 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5aed6cc2b27a65475281326e0368cd163758b434 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1a4ff5e2e26f8b33cb6c9b2c4eb9e537b2bef411bd914a024f9751fe33a3063 +size 300152 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8e94849e70f33bc1c1965e4c1c889d4e9cbd93c3 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec86a3f8443905debab4b9fc6a858e7b587767473297b3f855a8f3249f0b8cf4 +size 348849 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz b/openflamingo/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..ff967f2ca0d0868aacf7d7e67402599e64bab817 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3dfab451d9d5c20243e0ed85cd8b6c9657669fb9a0f83b5be165585783d55b5 +size 2164 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9fd53a985e58845db8476af7881c1a4bcf89b2fa --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1031461363c3772a9a32030693341b112093ff989eab6e3d04213d6873c235d9 +size 1027824 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_ellip_harm.py b/openflamingo/lib/python3.10/site-packages/scipy/special/_ellip_harm.py new file mode 100644 index 0000000000000000000000000000000000000000..1b1ce34aa58054be13edfd5d87f2059e8a0d9224 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_ellip_harm.py @@ -0,0 +1,214 @@ +import numpy as np + +from ._ufuncs import _ellip_harm +from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm + + +def ellip_harm(h2, k2, n, p, s, signm=1, signn=1): + r""" + Ellipsoidal harmonic functions E^p_n(l) + + These are also known as Lame functions of the first kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree + s : float + Coordinate + p : int + Order, can range between [1,2n+1] + signm : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + signn : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + + Returns + ------- + E : float + the harmonic :math:`E^p_n(s)` + + See Also + -------- + ellip_harm_2, ellip_normal + + Notes + ----- + The geometric interpretation of the ellipsoidal functions is + explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the + sign of prefactors for functions according to their type:: + + K : +1 + L : signm + M : signn + N : signm*signn + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Digital Library of Mathematical Functions 29.12 + https://dlmf.nist.gov/29.12 + .. [2] Bardhan and Knepley, "Computational science and + re-discovery: open-source implementations of + ellipsoidal harmonics for problems in potential theory", + Comput. Sci. Disc. 5, 014006 (2012) + :doi:`10.1088/1749-4699/5/1/014006`. + .. [3] David J.and Dechambre P, "Computation of Ellipsoidal + Gravity Field Harmonics for small solar system bodies" + pp. 30-36, 2000 + .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications" + pp. 418, 2012 + + Examples + -------- + >>> from scipy.special import ellip_harm + >>> w = ellip_harm(5,8,1,1,2.5) + >>> w + 2.5 + + Check that the functions indeed are solutions to the Lame equation: + + >>> import numpy as np + >>> from scipy.interpolate import UnivariateSpline + >>> def eigenvalue(f, df, ddf): + ... r = (((s**2 - h**2) * (s**2 - k**2) * ddf + ... + s * (2*s**2 - h**2 - k**2) * df + ... - n * (n + 1)*s**2*f) / f) + ... return -r.mean(), r.std() + >>> s = np.linspace(0.1, 10, 200) + >>> k, h, n, p = 8.0, 2.2, 3, 2 + >>> E = ellip_harm(h**2, k**2, n, p, s) + >>> E_spl = UnivariateSpline(s, E) + >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2)) + >>> a, a_err + (583.44366156701483, 6.4580890640310646e-11) + + """ # noqa: E501 + return _ellip_harm(h2, k2, n, p, s, signm, signn) + + +_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d') + + +def ellip_harm_2(h2, k2, n, p, s): + r""" + Ellipsoidal harmonic functions F^p_n(l) + + These are also known as Lame functions of the second kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + s : float + Coordinate + + Returns + ------- + F : float + The harmonic :math:`F^p_n(s)` + + See Also + -------- + ellip_harm, ellip_normal + + Notes + ----- + Lame functions of the second kind are related to the functions of the first kind: + + .. math:: + + F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s} + \frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}} + + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_harm_2 + >>> w = ellip_harm_2(5,8,2,1,10) + >>> w + 0.00108056853382 + + """ + with np.errstate(all='ignore'): + return _ellip_harm_2_vec(h2, k2, n, p, s) + + +def _ellip_normal_vec(h2, k2, n, p): + return _ellipsoid_norm(h2, k2, n, p) + + +_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d') + + +def ellip_normal(h2, k2, n, p): + r""" + Ellipsoidal harmonic normalization constants gamma^p_n + + The normalization constant is defined as + + .. math:: + + \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy + \frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + + Returns + ------- + gamma : float + The normalization constant :math:`\gamma^p_n` + + See Also + -------- + ellip_harm, ellip_harm_2 + + Notes + ----- + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_normal + >>> w = ellip_normal(5,8,3,7) + >>> w + 1723.38796997 + + """ + with np.errstate(all='ignore'): + return _ellip_normal_vec(h2, k2, n, p) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_lambertw.py b/openflamingo/lib/python3.10/site-packages/scipy/special/_lambertw.py new file mode 100644 index 0000000000000000000000000000000000000000..f758c7c21fdddc0ec1b84727d90c6de7f34a094e --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_lambertw.py @@ -0,0 +1,149 @@ +from ._ufuncs import _lambertw + +import numpy as np + + +def lambertw(z, k=0, tol=1e-8): + r""" + lambertw(z, k=0, tol=1e-8) + + Lambert W function. + + The Lambert W function `W(z)` is defined as the inverse function + of ``w * exp(w)``. In other words, the value of ``W(z)`` is + such that ``z = W(z) * exp(W(z))`` for any complex number + ``z``. + + The Lambert W function is a multivalued function with infinitely + many branches. Each branch gives a separate solution of the + equation ``z = w exp(w)``. Here, the branches are indexed by the + integer `k`. + + Parameters + ---------- + z : array_like + Input argument. + k : int, optional + Branch index. + tol : float, optional + Evaluation tolerance. + + Returns + ------- + w : array + `w` will have the same shape as `z`. + + See Also + -------- + wrightomega : the Wright Omega function + + Notes + ----- + All branches are supported by `lambertw`: + + * ``lambertw(z)`` gives the principal solution (branch 0) + * ``lambertw(z, k)`` gives the solution on branch `k` + + The Lambert W function has two partially real branches: the + principal branch (`k = 0`) is real for real ``z > -1/e``, and the + ``k = -1`` branch is real for ``-1/e < z < 0``. All branches except + ``k = 0`` have a logarithmic singularity at ``z = 0``. + + **Possible issues** + + The evaluation can become inaccurate very close to the branch point + at ``-1/e``. In some corner cases, `lambertw` might currently + fail to converge, or can end up on the wrong branch. + + **Algorithm** + + Halley's iteration is used to invert ``w * exp(w)``, using a first-order + asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate. + + The definition, implementation and choice of branches is based on [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Lambert_W_function + .. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5 + (1996) 329-359. + https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf + + Examples + -------- + The Lambert W function is the inverse of ``w exp(w)``: + + >>> import numpy as np + >>> from scipy.special import lambertw + >>> w = lambertw(1) + >>> w + (0.56714329040978384+0j) + >>> w * np.exp(w) + (1.0+0j) + + Any branch gives a valid inverse: + + >>> w = lambertw(1, k=3) + >>> w + (-2.8535817554090377+17.113535539412148j) + >>> w*np.exp(w) + (1.0000000000000002+1.609823385706477e-15j) + + **Applications to equation-solving** + + The Lambert W function may be used to solve various kinds of + equations. We give two examples here. + + First, the function can be used to solve implicit equations of the + form + + :math:`x = a + b e^{c x}` + + for :math:`x`. We assume :math:`c` is not zero. After a little + algebra, the equation may be written + + :math:`z e^z = -b c e^{a c}` + + where :math:`z = c (a - x)`. :math:`z` may then be expressed using + the Lambert W function + + :math:`z = W(-b c e^{a c})` + + giving + + :math:`x = a - W(-b c e^{a c})/c` + + For example, + + >>> a = 3 + >>> b = 2 + >>> c = -0.5 + + The solution to :math:`x = a + b e^{c x}` is: + + >>> x = a - lambertw(-b*c*np.exp(a*c))/c + >>> x + (3.3707498368978794+0j) + + Verify that it solves the equation: + + >>> a + b*np.exp(c*x) + (3.37074983689788+0j) + + The Lambert W function may also be used find the value of the infinite + power tower :math:`z^{z^{z^{\ldots}}}`: + + >>> def tower(z, n): + ... if n == 0: + ... return z + ... return z ** tower(z, n-1) + ... + >>> tower(0.5, 100) + 0.641185744504986 + >>> -lambertw(-np.log(0.5)) / np.log(0.5) + (0.64118574450498589+0j) + """ + # TODO: special expert should inspect this + # interception; better place to do it? + k = np.asarray(k, dtype=np.dtype("long")) + return _lambertw(z, k, tol) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_spfun_stats.py b/openflamingo/lib/python3.10/site-packages/scipy/special/_spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..2525eceb47ec2b20b45ca693e19e741f4a666597 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_spfun_stats.py @@ -0,0 +1,106 @@ +# Last Change: Sat Mar 21 02:00 PM 2009 J + +# Copyright (c) 2001, 2002 Enthought, Inc. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# a. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# b. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# c. Neither the name of the Enthought nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +"""Some more special functions which may be useful for multivariate statistical +analysis.""" + +import numpy as np +from scipy.special import gammaln as loggam + + +__all__ = ['multigammaln'] + + +def multigammaln(a, d): + r"""Returns the log of multivariate gamma, also sometimes called the + generalized gamma. + + Parameters + ---------- + a : ndarray + The multivariate gamma is computed for each item of `a`. + d : int + The dimension of the space of integration. + + Returns + ------- + res : ndarray + The values of the log multivariate gamma at the given points `a`. + + Notes + ----- + The formal definition of the multivariate gamma of dimension d for a real + `a` is + + .. math:: + + \Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA + + with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of + all the positive definite matrices of dimension `d`. Note that `a` is a + scalar: the integrand only is multivariate, the argument is not (the + function is defined over a subset of the real set). + + This can be proven to be equal to the much friendlier equation + + .. math:: + + \Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2). + + References + ---------- + R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in + probability and mathematical statistics). + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import multigammaln, gammaln + >>> a = 23.5 + >>> d = 10 + >>> multigammaln(a, d) + 454.1488605074416 + + Verify that the result agrees with the logarithm of the equation + shown above: + + >>> d*(d-1)/4*np.log(np.pi) + gammaln(a - 0.5*np.arange(0, d)).sum() + 454.1488605074416 + """ + a = np.asarray(a) + if not np.isscalar(d) or (np.floor(d) != d): + raise ValueError("d should be a positive integer (dimension)") + if np.any(a <= 0.5 * (d - 1)): + raise ValueError(f"condition a ({a:f}) > 0.5 * (d-1) ({0.5 * (d-1):f}) not met") + + res = (d * (d-1) * 0.25) * np.log(np.pi) + res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0) + return res diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_test_internal.pyi b/openflamingo/lib/python3.10/site-packages/scipy/special/_test_internal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0e209e366f0b37415159083434a053545bc78fae --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_test_internal.pyi @@ -0,0 +1,9 @@ +import numpy as np + +def have_fenv() -> bool: ... +def random_double(size: int) -> np.float64: ... +def test_add_round(size: int, mode: str): ... + +def _dd_exp(xhi: float, xlo: float) -> tuple[float, float]: ... +def _dd_log(xhi: float, xlo: float) -> tuple[float, float]: ... +def _dd_expm1(xhi: float, xlo: float) -> tuple[float, float]: ... diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi b/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ab1738021e174d176be0522c267bfe3e245a9c19 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi @@ -0,0 +1,526 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from typing import Any, Dict + +import numpy as np + +__all__ = [ + 'geterr', + 'seterr', + 'errstate', + 'agm', + 'airy', + 'airye', + 'bdtr', + 'bdtrc', + 'bdtri', + 'bdtrik', + 'bdtrin', + 'bei', + 'beip', + 'ber', + 'berp', + 'besselpoly', + 'beta', + 'betainc', + 'betaincc', + 'betainccinv', + 'betaincinv', + 'betaln', + 'binom', + 'boxcox', + 'boxcox1p', + 'btdtr', + 'btdtri', + 'btdtria', + 'btdtrib', + 'cbrt', + 'chdtr', + 'chdtrc', + 'chdtri', + 'chdtriv', + 'chndtr', + 'chndtridf', + 'chndtrinc', + 'chndtrix', + 'cosdg', + 'cosm1', + 'cotdg', + 'dawsn', + 'ellipe', + 'ellipeinc', + 'ellipj', + 'ellipk', + 'ellipkinc', + 'ellipkm1', + 'elliprc', + 'elliprd', + 'elliprf', + 'elliprg', + 'elliprj', + 'entr', + 'erf', + 'erfc', + 'erfcinv', + 'erfcx', + 'erfi', + 'erfinv', + 'eval_chebyc', + 'eval_chebys', + 'eval_chebyt', + 'eval_chebyu', + 'eval_gegenbauer', + 'eval_genlaguerre', + 'eval_hermite', + 'eval_hermitenorm', + 'eval_jacobi', + 'eval_laguerre', + 'eval_legendre', + 'eval_sh_chebyt', + 'eval_sh_chebyu', + 'eval_sh_jacobi', + 'eval_sh_legendre', + 'exp1', + 'exp10', + 'exp2', + 'expi', + 'expit', + 'expm1', + 'expn', + 'exprel', + 'fdtr', + 'fdtrc', + 'fdtri', + 'fdtridfd', + 'fresnel', + 'gamma', + 'gammainc', + 'gammaincc', + 'gammainccinv', + 'gammaincinv', + 'gammaln', + 'gammasgn', + 'gdtr', + 'gdtrc', + 'gdtria', + 'gdtrib', + 'gdtrix', + 'hankel1', + 'hankel1e', + 'hankel2', + 'hankel2e', + 'huber', + 'hyp0f1', + 'hyp1f1', + 'hyp2f1', + 'hyperu', + 'i0', + 'i0e', + 'i1', + 'i1e', + 'inv_boxcox', + 'inv_boxcox1p', + 'it2i0k0', + 'it2j0y0', + 'it2struve0', + 'itairy', + 'iti0k0', + 'itj0y0', + 'itmodstruve0', + 'itstruve0', + 'iv', + 'ive', + 'j0', + 'j1', + 'jn', + 'jv', + 'jve', + 'k0', + 'k0e', + 'k1', + 'k1e', + 'kei', + 'keip', + 'kelvin', + 'ker', + 'kerp', + 'kl_div', + 'kn', + 'kolmogi', + 'kolmogorov', + 'kv', + 'kve', + 'log1p', + 'log_expit', + 'log_ndtr', + 'loggamma', + 'logit', + 'lpmv', + 'mathieu_a', + 'mathieu_b', + 'mathieu_cem', + 'mathieu_modcem1', + 'mathieu_modcem2', + 'mathieu_modsem1', + 'mathieu_modsem2', + 'mathieu_sem', + 'modfresnelm', + 'modfresnelp', + 'modstruve', + 'nbdtr', + 'nbdtrc', + 'nbdtri', + 'nbdtrik', + 'nbdtrin', + 'ncfdtr', + 'ncfdtri', + 'ncfdtridfd', + 'ncfdtridfn', + 'ncfdtrinc', + 'nctdtr', + 'nctdtridf', + 'nctdtrinc', + 'nctdtrit', + 'ndtr', + 'ndtri', + 'ndtri_exp', + 'nrdtrimn', + 'nrdtrisd', + 'obl_ang1', + 'obl_ang1_cv', + 'obl_cv', + 'obl_rad1', + 'obl_rad1_cv', + 'obl_rad2', + 'obl_rad2_cv', + 'owens_t', + 'pbdv', + 'pbvv', + 'pbwa', + 'pdtr', + 'pdtrc', + 'pdtri', + 'pdtrik', + 'poch', + 'powm1', + 'pro_ang1', + 'pro_ang1_cv', + 'pro_cv', + 'pro_rad1', + 'pro_rad1_cv', + 'pro_rad2', + 'pro_rad2_cv', + 'pseudo_huber', + 'psi', + 'radian', + 'rel_entr', + 'rgamma', + 'round', + 'shichi', + 'sici', + 'sindg', + 'smirnov', + 'smirnovi', + 'spence', + 'sph_harm', + 'stdtr', + 'stdtridf', + 'stdtrit', + 'struve', + 'tandg', + 'tklmbda', + 'voigt_profile', + 'wofz', + 'wright_bessel', + 'wrightomega', + 'xlog1py', + 'xlogy', + 'y0', + 'y1', + 'yn', + 'yv', + 'yve', + 'zetac' +] + +def geterr() -> Dict[str, str]: ... +def seterr(**kwargs: str) -> Dict[str, str]: ... + +class errstate: + def __init__(self, **kargs: str) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: Any, # Unused + exc_value: Any, # Unused + traceback: Any, # Unused + ) -> None: ... + +_cosine_cdf: np.ufunc +_cosine_invcdf: np.ufunc +_cospi: np.ufunc +_ellip_harm: np.ufunc +_factorial: np.ufunc +_igam_fac: np.ufunc +_kolmogc: np.ufunc +_kolmogci: np.ufunc +_kolmogp: np.ufunc +_lambertw: np.ufunc +_lanczos_sum_expg_scaled: np.ufunc +_lgam1p: np.ufunc +_log1pmx: np.ufunc +_riemann_zeta: np.ufunc +_scaled_exp1: np.ufunc +_sf_error_test_function: np.ufunc +_sinpi: np.ufunc +_smirnovc: np.ufunc +_smirnovci: np.ufunc +_smirnovp: np.ufunc +_spherical_in: np.ufunc +_spherical_in_d: np.ufunc +_spherical_jn: np.ufunc +_spherical_jn_d: np.ufunc +_spherical_kn: np.ufunc +_spherical_kn_d: np.ufunc +_spherical_yn: np.ufunc +_spherical_yn_d: np.ufunc +_stirling2_inexact: np.ufunc +_struve_asymp_large_z: np.ufunc +_struve_bessel_series: np.ufunc +_struve_power_series: np.ufunc +_zeta: np.ufunc +agm: np.ufunc +airy: np.ufunc +airye: np.ufunc +bdtr: np.ufunc +bdtrc: np.ufunc +bdtri: np.ufunc +bdtrik: np.ufunc +bdtrin: np.ufunc +bei: np.ufunc +beip: np.ufunc +ber: np.ufunc +berp: np.ufunc +besselpoly: np.ufunc +beta: np.ufunc +betainc: np.ufunc +betaincc: np.ufunc +betainccinv: np.ufunc +betaincinv: np.ufunc +betaln: np.ufunc +binom: np.ufunc +boxcox1p: np.ufunc +boxcox: np.ufunc +btdtr: np.ufunc +btdtri: np.ufunc +btdtria: np.ufunc +btdtrib: np.ufunc +cbrt: np.ufunc +chdtr: np.ufunc +chdtrc: np.ufunc +chdtri: np.ufunc +chdtriv: np.ufunc +chndtr: np.ufunc +chndtridf: np.ufunc +chndtrinc: np.ufunc +chndtrix: np.ufunc +cosdg: np.ufunc +cosm1: np.ufunc +cotdg: np.ufunc +dawsn: np.ufunc +ellipe: np.ufunc +ellipeinc: np.ufunc +ellipj: np.ufunc +ellipk: np.ufunc +ellipkinc: np.ufunc +ellipkm1: np.ufunc +elliprc: np.ufunc +elliprd: np.ufunc +elliprf: np.ufunc +elliprg: np.ufunc +elliprj: np.ufunc +entr: np.ufunc +erf: np.ufunc +erfc: np.ufunc +erfcinv: np.ufunc +erfcx: np.ufunc +erfi: np.ufunc +erfinv: np.ufunc +eval_chebyc: np.ufunc +eval_chebys: np.ufunc +eval_chebyt: np.ufunc +eval_chebyu: np.ufunc +eval_gegenbauer: np.ufunc +eval_genlaguerre: np.ufunc +eval_hermite: np.ufunc +eval_hermitenorm: np.ufunc +eval_jacobi: np.ufunc +eval_laguerre: np.ufunc +eval_legendre: np.ufunc +eval_sh_chebyt: np.ufunc +eval_sh_chebyu: np.ufunc +eval_sh_jacobi: np.ufunc +eval_sh_legendre: np.ufunc +exp10: np.ufunc +exp1: np.ufunc +exp2: np.ufunc +expi: np.ufunc +expit: np.ufunc +expm1: np.ufunc +expn: np.ufunc +exprel: np.ufunc +fdtr: np.ufunc +fdtrc: np.ufunc +fdtri: np.ufunc +fdtridfd: np.ufunc +fresnel: np.ufunc +gamma: np.ufunc +gammainc: np.ufunc +gammaincc: np.ufunc +gammainccinv: np.ufunc +gammaincinv: np.ufunc +gammaln: np.ufunc +gammasgn: np.ufunc +gdtr: np.ufunc +gdtrc: np.ufunc +gdtria: np.ufunc +gdtrib: np.ufunc +gdtrix: np.ufunc +hankel1: np.ufunc +hankel1e: np.ufunc +hankel2: np.ufunc +hankel2e: np.ufunc +huber: np.ufunc +hyp0f1: np.ufunc +hyp1f1: np.ufunc +hyp2f1: np.ufunc +hyperu: np.ufunc +i0: np.ufunc +i0e: np.ufunc +i1: np.ufunc +i1e: np.ufunc +inv_boxcox1p: np.ufunc +inv_boxcox: np.ufunc +it2i0k0: np.ufunc +it2j0y0: np.ufunc +it2struve0: np.ufunc +itairy: np.ufunc +iti0k0: np.ufunc +itj0y0: np.ufunc +itmodstruve0: np.ufunc +itstruve0: np.ufunc +iv: np.ufunc +ive: np.ufunc +j0: np.ufunc +j1: np.ufunc +jn: np.ufunc +jv: np.ufunc +jve: np.ufunc +k0: np.ufunc +k0e: np.ufunc +k1: np.ufunc +k1e: np.ufunc +kei: np.ufunc +keip: np.ufunc +kelvin: np.ufunc +ker: np.ufunc +kerp: np.ufunc +kl_div: np.ufunc +kn: np.ufunc +kolmogi: np.ufunc +kolmogorov: np.ufunc +kv: np.ufunc +kve: np.ufunc +log1p: np.ufunc +log_expit: np.ufunc +log_ndtr: np.ufunc +loggamma: np.ufunc +logit: np.ufunc +lpmv: np.ufunc +mathieu_a: np.ufunc +mathieu_b: np.ufunc +mathieu_cem: np.ufunc +mathieu_modcem1: np.ufunc +mathieu_modcem2: np.ufunc +mathieu_modsem1: np.ufunc +mathieu_modsem2: np.ufunc +mathieu_sem: np.ufunc +modfresnelm: np.ufunc +modfresnelp: np.ufunc +modstruve: np.ufunc +nbdtr: np.ufunc +nbdtrc: np.ufunc +nbdtri: np.ufunc +nbdtrik: np.ufunc +nbdtrin: np.ufunc +ncfdtr: np.ufunc +ncfdtri: np.ufunc +ncfdtridfd: np.ufunc +ncfdtridfn: np.ufunc +ncfdtrinc: np.ufunc +nctdtr: np.ufunc +nctdtridf: np.ufunc +nctdtrinc: np.ufunc +nctdtrit: np.ufunc +ndtr: np.ufunc +ndtri: np.ufunc +ndtri_exp: np.ufunc +nrdtrimn: np.ufunc +nrdtrisd: np.ufunc +obl_ang1: np.ufunc +obl_ang1_cv: np.ufunc +obl_cv: np.ufunc +obl_rad1: np.ufunc +obl_rad1_cv: np.ufunc +obl_rad2: np.ufunc +obl_rad2_cv: np.ufunc +owens_t: np.ufunc +pbdv: np.ufunc +pbvv: np.ufunc +pbwa: np.ufunc +pdtr: np.ufunc +pdtrc: np.ufunc +pdtri: np.ufunc +pdtrik: np.ufunc +poch: np.ufunc +powm1: np.ufunc +pro_ang1: np.ufunc +pro_ang1_cv: np.ufunc +pro_cv: np.ufunc +pro_rad1: np.ufunc +pro_rad1_cv: np.ufunc +pro_rad2: np.ufunc +pro_rad2_cv: np.ufunc +pseudo_huber: np.ufunc +psi: np.ufunc +radian: np.ufunc +rel_entr: np.ufunc +rgamma: np.ufunc +round: np.ufunc +shichi: np.ufunc +sici: np.ufunc +sindg: np.ufunc +smirnov: np.ufunc +smirnovi: np.ufunc +spence: np.ufunc +sph_harm: np.ufunc +stdtr: np.ufunc +stdtridf: np.ufunc +stdtrit: np.ufunc +struve: np.ufunc +tandg: np.ufunc +tklmbda: np.ufunc +voigt_profile: np.ufunc +wofz: np.ufunc +wright_bessel: np.ufunc +wrightomega: np.ufunc +xlog1py: np.ufunc +xlogy: np.ufunc +y0: np.ufunc +y1: np.ufunc +yn: np.ufunc +yv: np.ufunc +yve: np.ufunc +zetac: np.ufunc + diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx b/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx new file mode 100644 index 0000000000000000000000000000000000000000..cc2b8a0528c6d3ad3076234256d89efe31541146 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx @@ -0,0 +1,181 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from libc.math cimport NAN + +include "_ufuncs_extra_code_common.pxi" + +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_ccospi "ccospi"(double complex) noexcept nogil +cdef void *_export_ccospi = _func_ccospi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_lambertw_scalar "lambertw_scalar"(double complex, long, double) noexcept nogil +cdef void *_export_lambertw_scalar = _func_lambertw_scalar +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_csinpi "csinpi"(double complex) noexcept nogil +cdef void *_export_csinpi = _func_csinpi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func__stirling2_inexact "_stirling2_inexact"(double, double) noexcept nogil +cdef void *_export__stirling2_inexact = _func__stirling2_inexact +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibeta_float "ibeta_float"(float, float, float) noexcept nogil +cdef void *_export_ibeta_float = _func_ibeta_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibeta_double "ibeta_double"(double, double, double) noexcept nogil +cdef void *_export_ibeta_double = _func_ibeta_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibetac_float "ibetac_float"(float, float, float) noexcept nogil +cdef void *_export_ibetac_float = _func_ibetac_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibetac_double "ibetac_double"(double, double, double) noexcept nogil +cdef void *_export_ibetac_double = _func_ibetac_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibetac_inv_float "ibetac_inv_float"(float, float, float) noexcept nogil +cdef void *_export_ibetac_inv_float = _func_ibetac_inv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibetac_inv_double "ibetac_inv_double"(double, double, double) noexcept nogil +cdef void *_export_ibetac_inv_double = _func_ibetac_inv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibeta_inv_float "ibeta_inv_float"(float, float, float) noexcept nogil +cdef void *_export_ibeta_inv_float = _func_ibeta_inv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibeta_inv_double "ibeta_inv_double"(double, double, double) noexcept nogil +cdef void *_export_ibeta_inv_double = _func_ibeta_inv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom "binom"(double, double) noexcept nogil +cdef void *_export_binom = _func_binom +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_dawsn "faddeeva_dawsn"(double) noexcept nogil +cdef void *_export_faddeeva_dawsn = _func_faddeeva_dawsn +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_dawsn_complex "faddeeva_dawsn_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_dawsn_complex = _func_faddeeva_dawsn_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RC "fellint_RC"(double, double) noexcept nogil +cdef void *_export_fellint_RC = _func_fellint_RC +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RC "cellint_RC"(double complex, double complex) noexcept nogil +cdef void *_export_cellint_RC = _func_cellint_RC +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RD "fellint_RD"(double, double, double) noexcept nogil +cdef void *_export_fellint_RD = _func_fellint_RD +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RD "cellint_RD"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RD = _func_cellint_RD +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RF "fellint_RF"(double, double, double) noexcept nogil +cdef void *_export_fellint_RF = _func_fellint_RF +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RF "cellint_RF"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RF = _func_cellint_RF +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RG "fellint_RG"(double, double, double) noexcept nogil +cdef void *_export_fellint_RG = _func_fellint_RG +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RG "cellint_RG"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RG = _func_cellint_RG +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RJ "fellint_RJ"(double, double, double, double) noexcept nogil +cdef void *_export_fellint_RJ = _func_fellint_RJ +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RJ "cellint_RJ"(double complex, double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RJ = _func_cellint_RJ +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erf "faddeeva_erf"(double complex) noexcept nogil +cdef void *_export_faddeeva_erf = _func_faddeeva_erf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfc_complex "faddeeva_erfc_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfc_complex = _func_faddeeva_erfc_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_erfcx "faddeeva_erfcx"(double) noexcept nogil +cdef void *_export_faddeeva_erfcx = _func_faddeeva_erfcx +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfcx_complex "faddeeva_erfcx_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfcx_complex = _func_faddeeva_erfcx_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_erfi "faddeeva_erfi"(double) noexcept nogil +cdef void *_export_faddeeva_erfi = _func_faddeeva_erfi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfi_complex "faddeeva_erfi_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfi_complex = _func_faddeeva_erfi_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_erfinv_float "erfinv_float"(float) noexcept nogil +cdef void *_export_erfinv_float = _func_erfinv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_erfinv_double "erfinv_double"(double) noexcept nogil +cdef void *_export_erfinv_double = _func_erfinv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_expit "expit"(double) noexcept nogil +cdef void *_export_expit = _func_expit +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_expitf "expitf"(float) noexcept nogil +cdef void *_export_expitf = _func_expitf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef long double _func_expitl "expitl"(long double) noexcept nogil +cdef void *_export_expitl = _func_expitl +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cgamma "cgamma"(double complex) noexcept nogil +cdef void *_export_cgamma = _func_cgamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hyp1f1_double "hyp1f1_double"(double, double, double) noexcept nogil +cdef void *_export_hyp1f1_double = _func_hyp1f1_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_log_expit "log_expit"(double) noexcept nogil +cdef void *_export_log_expit = _func_log_expit +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_log_expitf "log_expitf"(float) noexcept nogil +cdef void *_export_log_expitf = _func_log_expitf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef long double _func_log_expitl "log_expitl"(long double) noexcept nogil +cdef void *_export_log_expitl = _func_log_expitl +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_log_ndtr "faddeeva_log_ndtr"(double) noexcept nogil +cdef void *_export_faddeeva_log_ndtr = _func_faddeeva_log_ndtr +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_log_ndtr_complex "faddeeva_log_ndtr_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_log_ndtr_complex = _func_faddeeva_log_ndtr_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_loggamma_real "loggamma_real"(double) noexcept nogil +cdef void *_export_loggamma_real = _func_loggamma_real +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_loggamma "loggamma"(double complex) noexcept nogil +cdef void *_export_loggamma = _func_loggamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_logit "logit"(double) noexcept nogil +cdef void *_export_logit = _func_logit +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_logitf "logitf"(float) noexcept nogil +cdef void *_export_logitf = _func_logitf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef long double _func_logitl "logitl"(long double) noexcept nogil +cdef void *_export_logitl = _func_logitl +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_ndtr "faddeeva_ndtr"(double complex) noexcept nogil +cdef void *_export_faddeeva_ndtr = _func_faddeeva_ndtr +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_powm1_float "powm1_float"(float, float) noexcept nogil +cdef void *_export_powm1_float = _func_powm1_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_powm1_double "powm1_double"(double, double) noexcept nogil +cdef void *_export_powm1_double = _func_powm1_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cdigamma "cdigamma"(double complex) noexcept nogil +cdef void *_export_cdigamma = _func_cdigamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_digamma "digamma"(double) noexcept nogil +cdef void *_export_digamma = _func_digamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_crgamma "crgamma"(double complex) noexcept nogil +cdef void *_export_crgamma = _func_crgamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_voigt_profile "faddeeva_voigt_profile"(double, double, double) noexcept nogil +cdef void *_export_faddeeva_voigt_profile = _func_faddeeva_voigt_profile +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_w "faddeeva_w"(double complex) noexcept nogil +cdef void *_export_faddeeva_w = _func_faddeeva_w +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_wrightomega "wrightomega"(double complex) noexcept nogil +cdef void *_export_wrightomega = _func_wrightomega +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_wrightomega_real "wrightomega_real"(double) noexcept nogil +cdef void *_export_wrightomega_real = _func_wrightomega_real \ No newline at end of file diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs_defs.h b/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..bf634d76844b6f28b2ba8bd4696f933b139e444b --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/_ufuncs_defs.h @@ -0,0 +1,185 @@ +#ifndef UFUNCS_PROTO_H +#define UFUNCS_PROTO_H 1 +#include "_cosine.h" +npy_double cosine_cdf(npy_double); +npy_double cosine_invcdf(npy_double); +#include "cephes.h" +npy_double cospi(npy_double); +npy_double igam_fac(npy_double, npy_double); +npy_double kolmogc(npy_double); +npy_double kolmogci(npy_double); +npy_double kolmogp(npy_double); +npy_double lanczos_sum_expg_scaled(npy_double); +npy_double lgam1p(npy_double); +npy_double log1pmx(npy_double); +npy_double riemann_zeta(npy_double); +#include "scaled_exp1.h" +npy_double scaled_exp1(npy_double); +npy_double sinpi(npy_double); +npy_double smirnovc(npy_int, npy_double); +npy_double smirnovci(npy_int, npy_double); +npy_double smirnovp(npy_int, npy_double); +npy_double struve_asymp_large_z(npy_double, npy_double, npy_int, npy_double *); +npy_double struve_bessel_series(npy_double, npy_double, npy_int, npy_double *); +npy_double struve_power_series(npy_double, npy_double, npy_int, npy_double *); +npy_double zeta(npy_double, npy_double); +#include "amos_wrappers.h" +npy_int airy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_int cairy_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *); +npy_int cairy_wrap_e(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *); +npy_int cairy_wrap_e_real(npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_double bdtr(npy_double, npy_int, npy_double); +npy_double bdtrc(npy_double, npy_int, npy_double); +npy_double bdtri(npy_double, npy_int, npy_double); +#include "specfun_wrappers.h" +npy_double bei_wrap(npy_double); +npy_double beip_wrap(npy_double); +npy_double ber_wrap(npy_double); +npy_double berp_wrap(npy_double); +npy_double besselpoly(npy_double, npy_double, npy_double); +npy_double beta(npy_double, npy_double); +npy_double lbeta(npy_double, npy_double); +npy_double btdtr(npy_double, npy_double, npy_double); +npy_double incbi(npy_double, npy_double, npy_double); +npy_double cbrt(npy_double); +npy_double chdtr(npy_double, npy_double); +npy_double chdtrc(npy_double, npy_double); +npy_double chdtri(npy_double, npy_double); +npy_double cosdg(npy_double); +npy_double cosm1(npy_double); +npy_double cotdg(npy_double); +npy_double ellpe(npy_double); +npy_double ellie(npy_double, npy_double); +npy_int ellpj(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_double ellik(npy_double, npy_double); +npy_double ellpk(npy_double); +npy_double erf(npy_double); +npy_double erfc(npy_double); +npy_double erfcinv(npy_double); +npy_cdouble cexp1_wrap(npy_cdouble); +npy_double exp1_wrap(npy_double); +npy_double exp10(npy_double); +npy_double exp2(npy_double); +npy_cdouble cexpi_wrap(npy_cdouble); +npy_double expi_wrap(npy_double); +npy_double expm1(npy_double); +npy_double expn(npy_int, npy_double); +npy_double fdtr(npy_double, npy_double, npy_double); +npy_double fdtrc(npy_double, npy_double, npy_double); +npy_double fdtri(npy_double, npy_double, npy_double); +npy_int fresnl(npy_double, npy_double *, npy_double *); +npy_int cfresnl_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *); +npy_double Gamma(npy_double); +npy_double igam(npy_double, npy_double); +npy_double igamc(npy_double, npy_double); +npy_double igamci(npy_double, npy_double); +npy_double igami(npy_double, npy_double); +npy_double lgam(npy_double); +npy_double gammasgn(npy_double); +npy_double gdtr(npy_double, npy_double, npy_double); +npy_double gdtrc(npy_double, npy_double, npy_double); +npy_cdouble cbesh_wrap1(npy_double, npy_cdouble); +npy_cdouble cbesh_wrap1_e(npy_double, npy_cdouble); +npy_cdouble cbesh_wrap2(npy_double, npy_cdouble); +npy_cdouble cbesh_wrap2_e(npy_double, npy_cdouble); +npy_cdouble chyp1f1_wrap(npy_double, npy_double, npy_cdouble); +npy_double hyp2f1(npy_double, npy_double, npy_double, npy_double); +npy_double i0(npy_double); +npy_double i0e(npy_double); +npy_double i1(npy_double); +npy_double i1e(npy_double); +npy_int it2i0k0_wrap(npy_double, npy_double *, npy_double *); +npy_int it2j0y0_wrap(npy_double, npy_double *, npy_double *); +npy_double it2struve0_wrap(npy_double); +npy_int itairy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_int it1i0k0_wrap(npy_double, npy_double *, npy_double *); +npy_int it1j0y0_wrap(npy_double, npy_double *, npy_double *); +npy_double itmodstruve0_wrap(npy_double); +npy_double itstruve0_wrap(npy_double); +npy_cdouble cbesi_wrap(npy_double, npy_cdouble); +npy_double iv(npy_double, npy_double); +npy_cdouble cbesi_wrap_e(npy_double, npy_cdouble); +npy_double cbesi_wrap_e_real(npy_double, npy_double); +npy_double j0(npy_double); +npy_double j1(npy_double); +npy_cdouble cbesj_wrap(npy_double, npy_cdouble); +npy_double cbesj_wrap_real(npy_double, npy_double); +npy_cdouble cbesj_wrap_e(npy_double, npy_cdouble); +npy_double cbesj_wrap_e_real(npy_double, npy_double); +npy_double k0(npy_double); +npy_double k0e(npy_double); +npy_double k1(npy_double); +npy_double k1e(npy_double); +npy_double kei_wrap(npy_double); +npy_double keip_wrap(npy_double); +npy_int kelvin_wrap(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *); +npy_double ker_wrap(npy_double); +npy_double kerp_wrap(npy_double); +npy_double cbesk_wrap_real_int(npy_int, npy_double); +npy_double kolmogi(npy_double); +npy_double kolmogorov(npy_double); +npy_cdouble cbesk_wrap(npy_double, npy_cdouble); +npy_double cbesk_wrap_real(npy_double, npy_double); +npy_cdouble cbesk_wrap_e(npy_double, npy_cdouble); +npy_double cbesk_wrap_e_real(npy_double, npy_double); +npy_double log1p(npy_double); +npy_double pmv_wrap(npy_double, npy_double, npy_double); +npy_double cem_cva_wrap(npy_double, npy_double); +npy_double sem_cva_wrap(npy_double, npy_double); +npy_int cem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int mcm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int mcm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int msm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int msm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int sem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int modified_fresnel_minus_wrap(npy_double, npy_cdouble *, npy_cdouble *); +npy_int modified_fresnel_plus_wrap(npy_double, npy_cdouble *, npy_cdouble *); +npy_double struve_l(npy_double, npy_double); +npy_double nbdtr(npy_int, npy_int, npy_double); +npy_double nbdtrc(npy_int, npy_int, npy_double); +npy_double nbdtri(npy_int, npy_int, npy_double); +npy_double ndtr(npy_double); +npy_double ndtri(npy_double); +npy_double oblate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int oblate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double oblate_segv_wrap(npy_double, npy_double, npy_double); +npy_double oblate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int oblate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double oblate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int oblate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double owens_t(npy_double, npy_double); +npy_int pbdv_wrap(npy_double, npy_double, npy_double *, npy_double *); +npy_int pbvv_wrap(npy_double, npy_double, npy_double *, npy_double *); +npy_int pbwa_wrap(npy_double, npy_double, npy_double *, npy_double *); +npy_double pdtr(npy_double, npy_double); +npy_double pdtrc(npy_double, npy_double); +npy_double pdtri(npy_int, npy_double); +npy_double poch(npy_double, npy_double); +npy_double prolate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int prolate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double prolate_segv_wrap(npy_double, npy_double, npy_double); +npy_double prolate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int prolate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double prolate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int prolate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double radian(npy_double, npy_double, npy_double); +npy_double rgamma(npy_double); +npy_double round(npy_double); +npy_int shichi(npy_double, npy_double *, npy_double *); +npy_int sici(npy_double, npy_double *, npy_double *); +npy_double sindg(npy_double); +npy_double smirnov(npy_int, npy_double); +npy_double smirnovi(npy_int, npy_double); +npy_double spence(npy_double); +npy_double struve_h(npy_double, npy_double); +npy_double tandg(npy_double); +npy_double tukeylambdacdf(npy_double, npy_double); +npy_double y0(npy_double); +npy_double y1(npy_double); +npy_double yn(npy_int, npy_double); +npy_cdouble cbesy_wrap(npy_double, npy_cdouble); +npy_double cbesy_wrap_real(npy_double, npy_double); +npy_cdouble cbesy_wrap_e(npy_double, npy_cdouble); +npy_double cbesy_wrap_e_real(npy_double, npy_double); +npy_double zetac(npy_double); +#endif diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/cython_special.pyx b/openflamingo/lib/python3.10/site-packages/scipy/special/cython_special.pyx new file mode 100644 index 0000000000000000000000000000000000000000..2ffe3a26ccdf229b59bcb4f2ef37d3b2dd431167 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/cython_special.pyx @@ -0,0 +1,3698 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! +""" +.. highlight:: cython + +Cython API for special functions +================================ + +Scalar, typed versions of many of the functions in ``scipy.special`` +can be accessed directly from Cython; the complete list is given +below. Functions are overloaded using Cython fused types so their +names match their Python counterpart. The module follows the following +conventions: + +- If a function's Python counterpart returns multiple values, then the + function returns its outputs via pointers in the final arguments. +- If a function's Python counterpart returns a single value, then the + function's output is returned directly. + +The module is usable from Cython via:: + + cimport scipy.special.cython_special + +Error handling +-------------- + +Functions can indicate an error by returning ``nan``; however they +cannot emit warnings like their counterparts in ``scipy.special``. + +Available functions +------------------- + +- :py:func:`~scipy.special.voigt_profile`:: + + double voigt_profile(double, double, double) + +- :py:func:`~scipy.special.agm`:: + + double agm(double, double) + +- :py:func:`~scipy.special.airy`:: + + void airy(double, double *, double *, double *, double *) + void airy(double complex, double complex *, double complex *, double complex *, double complex *) + +- :py:func:`~scipy.special.airye`:: + + void airye(double complex, double complex *, double complex *, double complex *, double complex *) + void airye(double, double *, double *, double *, double *) + +- :py:func:`~scipy.special.bdtr`:: + + double bdtr(double, double, double) + double bdtr(double, long, double) + +- :py:func:`~scipy.special.bdtrc`:: + + double bdtrc(double, double, double) + double bdtrc(double, long, double) + +- :py:func:`~scipy.special.bdtri`:: + + double bdtri(double, double, double) + double bdtri(double, long, double) + +- :py:func:`~scipy.special.bdtrik`:: + + double bdtrik(double, double, double) + +- :py:func:`~scipy.special.bdtrin`:: + + double bdtrin(double, double, double) + +- :py:func:`~scipy.special.bei`:: + + double bei(double) + +- :py:func:`~scipy.special.beip`:: + + double beip(double) + +- :py:func:`~scipy.special.ber`:: + + double ber(double) + +- :py:func:`~scipy.special.berp`:: + + double berp(double) + +- :py:func:`~scipy.special.besselpoly`:: + + double besselpoly(double, double, double) + +- :py:func:`~scipy.special.beta`:: + + double beta(double, double) + +- :py:func:`~scipy.special.betainc`:: + + float betainc(float, float, float) + double betainc(double, double, double) + +- :py:func:`~scipy.special.betaincc`:: + + float betaincc(float, float, float) + double betaincc(double, double, double) + +- :py:func:`~scipy.special.betaincinv`:: + + float betaincinv(float, float, float) + double betaincinv(double, double, double) + +- :py:func:`~scipy.special.betainccinv`:: + + float betainccinv(float, float, float) + double betainccinv(double, double, double) + +- :py:func:`~scipy.special.betaln`:: + + double betaln(double, double) + +- :py:func:`~scipy.special.binom`:: + + double binom(double, double) + +- :py:func:`~scipy.special.boxcox`:: + + double boxcox(double, double) + +- :py:func:`~scipy.special.boxcox1p`:: + + double boxcox1p(double, double) + +- :py:func:`~scipy.special.btdtr`:: + + double btdtr(double, double, double) + +- :py:func:`~scipy.special.btdtri`:: + + double btdtri(double, double, double) + +- :py:func:`~scipy.special.btdtria`:: + + double btdtria(double, double, double) + +- :py:func:`~scipy.special.btdtrib`:: + + double btdtrib(double, double, double) + +- :py:func:`~scipy.special.cbrt`:: + + double cbrt(double) + +- :py:func:`~scipy.special.chdtr`:: + + double chdtr(double, double) + +- :py:func:`~scipy.special.chdtrc`:: + + double chdtrc(double, double) + +- :py:func:`~scipy.special.chdtri`:: + + double chdtri(double, double) + +- :py:func:`~scipy.special.chdtriv`:: + + double chdtriv(double, double) + +- :py:func:`~scipy.special.chndtr`:: + + double chndtr(double, double, double) + +- :py:func:`~scipy.special.chndtridf`:: + + double chndtridf(double, double, double) + +- :py:func:`~scipy.special.chndtrinc`:: + + double chndtrinc(double, double, double) + +- :py:func:`~scipy.special.chndtrix`:: + + double chndtrix(double, double, double) + +- :py:func:`~scipy.special.cosdg`:: + + double cosdg(double) + +- :py:func:`~scipy.special.cosm1`:: + + double cosm1(double) + +- :py:func:`~scipy.special.cotdg`:: + + double cotdg(double) + +- :py:func:`~scipy.special.dawsn`:: + + double dawsn(double) + double complex dawsn(double complex) + +- :py:func:`~scipy.special.ellipe`:: + + double ellipe(double) + +- :py:func:`~scipy.special.ellipeinc`:: + + double ellipeinc(double, double) + +- :py:func:`~scipy.special.ellipj`:: + + void ellipj(double, double, double *, double *, double *, double *) + +- :py:func:`~scipy.special.ellipkinc`:: + + double ellipkinc(double, double) + +- :py:func:`~scipy.special.ellipkm1`:: + + double ellipkm1(double) + +- :py:func:`~scipy.special.ellipk`:: + + double ellipk(double) + +- :py:func:`~scipy.special.elliprc`:: + + double elliprc(double, double) + double complex elliprc(double complex, double complex) + +- :py:func:`~scipy.special.elliprd`:: + + double elliprd(double, double, double) + double complex elliprd(double complex, double complex, double complex) + +- :py:func:`~scipy.special.elliprf`:: + + double elliprf(double, double, double) + double complex elliprf(double complex, double complex, double complex) + +- :py:func:`~scipy.special.elliprg`:: + + double elliprg(double, double, double) + double complex elliprg(double complex, double complex, double complex) + +- :py:func:`~scipy.special.elliprj`:: + + double elliprj(double, double, double, double) + double complex elliprj(double complex, double complex, double complex, double complex) + +- :py:func:`~scipy.special.entr`:: + + double entr(double) + +- :py:func:`~scipy.special.erf`:: + + double complex erf(double complex) + double erf(double) + +- :py:func:`~scipy.special.erfc`:: + + double complex erfc(double complex) + double erfc(double) + +- :py:func:`~scipy.special.erfcx`:: + + double erfcx(double) + double complex erfcx(double complex) + +- :py:func:`~scipy.special.erfi`:: + + double erfi(double) + double complex erfi(double complex) + +- :py:func:`~scipy.special.erfinv`:: + + float erfinv(float) + double erfinv(double) + +- :py:func:`~scipy.special.erfcinv`:: + + double erfcinv(double) + +- :py:func:`~scipy.special.eval_chebyc`:: + + double complex eval_chebyc(double, double complex) + double eval_chebyc(double, double) + double eval_chebyc(long, double) + +- :py:func:`~scipy.special.eval_chebys`:: + + double complex eval_chebys(double, double complex) + double eval_chebys(double, double) + double eval_chebys(long, double) + +- :py:func:`~scipy.special.eval_chebyt`:: + + double complex eval_chebyt(double, double complex) + double eval_chebyt(double, double) + double eval_chebyt(long, double) + +- :py:func:`~scipy.special.eval_chebyu`:: + + double complex eval_chebyu(double, double complex) + double eval_chebyu(double, double) + double eval_chebyu(long, double) + +- :py:func:`~scipy.special.eval_gegenbauer`:: + + double complex eval_gegenbauer(double, double, double complex) + double eval_gegenbauer(double, double, double) + double eval_gegenbauer(long, double, double) + +- :py:func:`~scipy.special.eval_genlaguerre`:: + + double complex eval_genlaguerre(double, double, double complex) + double eval_genlaguerre(double, double, double) + double eval_genlaguerre(long, double, double) + +- :py:func:`~scipy.special.eval_hermite`:: + + double eval_hermite(long, double) + +- :py:func:`~scipy.special.eval_hermitenorm`:: + + double eval_hermitenorm(long, double) + +- :py:func:`~scipy.special.eval_jacobi`:: + + double complex eval_jacobi(double, double, double, double complex) + double eval_jacobi(double, double, double, double) + double eval_jacobi(long, double, double, double) + +- :py:func:`~scipy.special.eval_laguerre`:: + + double complex eval_laguerre(double, double complex) + double eval_laguerre(double, double) + double eval_laguerre(long, double) + +- :py:func:`~scipy.special.eval_legendre`:: + + double complex eval_legendre(double, double complex) + double eval_legendre(double, double) + double eval_legendre(long, double) + +- :py:func:`~scipy.special.eval_sh_chebyt`:: + + double complex eval_sh_chebyt(double, double complex) + double eval_sh_chebyt(double, double) + double eval_sh_chebyt(long, double) + +- :py:func:`~scipy.special.eval_sh_chebyu`:: + + double complex eval_sh_chebyu(double, double complex) + double eval_sh_chebyu(double, double) + double eval_sh_chebyu(long, double) + +- :py:func:`~scipy.special.eval_sh_jacobi`:: + + double complex eval_sh_jacobi(double, double, double, double complex) + double eval_sh_jacobi(double, double, double, double) + double eval_sh_jacobi(long, double, double, double) + +- :py:func:`~scipy.special.eval_sh_legendre`:: + + double complex eval_sh_legendre(double, double complex) + double eval_sh_legendre(double, double) + double eval_sh_legendre(long, double) + +- :py:func:`~scipy.special.exp1`:: + + double complex exp1(double complex) + double exp1(double) + +- :py:func:`~scipy.special.exp10`:: + + double exp10(double) + +- :py:func:`~scipy.special.exp2`:: + + double exp2(double) + +- :py:func:`~scipy.special.expi`:: + + double complex expi(double complex) + double expi(double) + +- :py:func:`~scipy.special.expit`:: + + double expit(double) + float expit(float) + long double expit(long double) + +- :py:func:`~scipy.special.expm1`:: + + double complex expm1(double complex) + double expm1(double) + +- :py:func:`~scipy.special.expn`:: + + double expn(double, double) + double expn(long, double) + +- :py:func:`~scipy.special.exprel`:: + + double exprel(double) + +- :py:func:`~scipy.special.fdtr`:: + + double fdtr(double, double, double) + +- :py:func:`~scipy.special.fdtrc`:: + + double fdtrc(double, double, double) + +- :py:func:`~scipy.special.fdtri`:: + + double fdtri(double, double, double) + +- :py:func:`~scipy.special.fdtridfd`:: + + double fdtridfd(double, double, double) + +- :py:func:`~scipy.special.fresnel`:: + + void fresnel(double, double *, double *) + void fresnel(double complex, double complex *, double complex *) + +- :py:func:`~scipy.special.gamma`:: + + double complex gamma(double complex) + double gamma(double) + +- :py:func:`~scipy.special.gammainc`:: + + double gammainc(double, double) + +- :py:func:`~scipy.special.gammaincc`:: + + double gammaincc(double, double) + +- :py:func:`~scipy.special.gammainccinv`:: + + double gammainccinv(double, double) + +- :py:func:`~scipy.special.gammaincinv`:: + + double gammaincinv(double, double) + +- :py:func:`~scipy.special.gammaln`:: + + double gammaln(double) + +- :py:func:`~scipy.special.gammasgn`:: + + double gammasgn(double) + +- :py:func:`~scipy.special.gdtr`:: + + double gdtr(double, double, double) + +- :py:func:`~scipy.special.gdtrc`:: + + double gdtrc(double, double, double) + +- :py:func:`~scipy.special.gdtria`:: + + double gdtria(double, double, double) + +- :py:func:`~scipy.special.gdtrib`:: + + double gdtrib(double, double, double) + +- :py:func:`~scipy.special.gdtrix`:: + + double gdtrix(double, double, double) + +- :py:func:`~scipy.special.hankel1`:: + + double complex hankel1(double, double complex) + +- :py:func:`~scipy.special.hankel1e`:: + + double complex hankel1e(double, double complex) + +- :py:func:`~scipy.special.hankel2`:: + + double complex hankel2(double, double complex) + +- :py:func:`~scipy.special.hankel2e`:: + + double complex hankel2e(double, double complex) + +- :py:func:`~scipy.special.huber`:: + + double huber(double, double) + +- :py:func:`~scipy.special.hyp0f1`:: + + double complex hyp0f1(double, double complex) + double hyp0f1(double, double) + +- :py:func:`~scipy.special.hyp1f1`:: + + double hyp1f1(double, double, double) + double complex hyp1f1(double, double, double complex) + +- :py:func:`~scipy.special.hyp2f1`:: + + double hyp2f1(double, double, double, double) + double complex hyp2f1(double, double, double, double complex) + +- :py:func:`~scipy.special.hyperu`:: + + double hyperu(double, double, double) + +- :py:func:`~scipy.special.i0`:: + + double i0(double) + +- :py:func:`~scipy.special.i0e`:: + + double i0e(double) + +- :py:func:`~scipy.special.i1`:: + + double i1(double) + +- :py:func:`~scipy.special.i1e`:: + + double i1e(double) + +- :py:func:`~scipy.special.inv_boxcox`:: + + double inv_boxcox(double, double) + +- :py:func:`~scipy.special.inv_boxcox1p`:: + + double inv_boxcox1p(double, double) + +- :py:func:`~scipy.special.it2i0k0`:: + + void it2i0k0(double, double *, double *) + +- :py:func:`~scipy.special.it2j0y0`:: + + void it2j0y0(double, double *, double *) + +- :py:func:`~scipy.special.it2struve0`:: + + double it2struve0(double) + +- :py:func:`~scipy.special.itairy`:: + + void itairy(double, double *, double *, double *, double *) + +- :py:func:`~scipy.special.iti0k0`:: + + void iti0k0(double, double *, double *) + +- :py:func:`~scipy.special.itj0y0`:: + + void itj0y0(double, double *, double *) + +- :py:func:`~scipy.special.itmodstruve0`:: + + double itmodstruve0(double) + +- :py:func:`~scipy.special.itstruve0`:: + + double itstruve0(double) + +- :py:func:`~scipy.special.iv`:: + + double complex iv(double, double complex) + double iv(double, double) + +- :py:func:`~scipy.special.ive`:: + + double complex ive(double, double complex) + double ive(double, double) + +- :py:func:`~scipy.special.j0`:: + + double j0(double) + +- :py:func:`~scipy.special.j1`:: + + double j1(double) + +- :py:func:`~scipy.special.jv`:: + + double complex jv(double, double complex) + double jv(double, double) + +- :py:func:`~scipy.special.jve`:: + + double complex jve(double, double complex) + double jve(double, double) + +- :py:func:`~scipy.special.k0`:: + + double k0(double) + +- :py:func:`~scipy.special.k0e`:: + + double k0e(double) + +- :py:func:`~scipy.special.k1`:: + + double k1(double) + +- :py:func:`~scipy.special.k1e`:: + + double k1e(double) + +- :py:func:`~scipy.special.kei`:: + + double kei(double) + +- :py:func:`~scipy.special.keip`:: + + double keip(double) + +- :py:func:`~scipy.special.kelvin`:: + + void kelvin(double, double complex *, double complex *, double complex *, double complex *) + +- :py:func:`~scipy.special.ker`:: + + double ker(double) + +- :py:func:`~scipy.special.kerp`:: + + double kerp(double) + +- :py:func:`~scipy.special.kl_div`:: + + double kl_div(double, double) + +- :py:func:`~scipy.special.kn`:: + + double kn(double, double) + double kn(long, double) + +- :py:func:`~scipy.special.kolmogi`:: + + double kolmogi(double) + +- :py:func:`~scipy.special.kolmogorov`:: + + double kolmogorov(double) + +- :py:func:`~scipy.special.kv`:: + + double complex kv(double, double complex) + double kv(double, double) + +- :py:func:`~scipy.special.kve`:: + + double complex kve(double, double complex) + double kve(double, double) + +- :py:func:`~scipy.special.log1p`:: + + double complex log1p(double complex) + double log1p(double) + +- :py:func:`~scipy.special.log_expit`:: + + double log_expit(double) + float log_expit(float) + long double log_expit(long double) + +- :py:func:`~scipy.special.log_ndtr`:: + + double log_ndtr(double) + double complex log_ndtr(double complex) + +- :py:func:`~scipy.special.loggamma`:: + + double loggamma(double) + double complex loggamma(double complex) + +- :py:func:`~scipy.special.logit`:: + + double logit(double) + float logit(float) + long double logit(long double) + +- :py:func:`~scipy.special.lpmv`:: + + double lpmv(double, double, double) + +- :py:func:`~scipy.special.mathieu_a`:: + + double mathieu_a(double, double) + +- :py:func:`~scipy.special.mathieu_b`:: + + double mathieu_b(double, double) + +- :py:func:`~scipy.special.mathieu_cem`:: + + void mathieu_cem(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modcem1`:: + + void mathieu_modcem1(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modcem2`:: + + void mathieu_modcem2(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modsem1`:: + + void mathieu_modsem1(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modsem2`:: + + void mathieu_modsem2(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_sem`:: + + void mathieu_sem(double, double, double, double *, double *) + +- :py:func:`~scipy.special.modfresnelm`:: + + void modfresnelm(double, double complex *, double complex *) + +- :py:func:`~scipy.special.modfresnelp`:: + + void modfresnelp(double, double complex *, double complex *) + +- :py:func:`~scipy.special.modstruve`:: + + double modstruve(double, double) + +- :py:func:`~scipy.special.nbdtr`:: + + double nbdtr(double, double, double) + double nbdtr(long, long, double) + +- :py:func:`~scipy.special.nbdtrc`:: + + double nbdtrc(double, double, double) + double nbdtrc(long, long, double) + +- :py:func:`~scipy.special.nbdtri`:: + + double nbdtri(double, double, double) + double nbdtri(long, long, double) + +- :py:func:`~scipy.special.nbdtrik`:: + + double nbdtrik(double, double, double) + +- :py:func:`~scipy.special.nbdtrin`:: + + double nbdtrin(double, double, double) + +- :py:func:`~scipy.special.ncfdtr`:: + + double ncfdtr(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtri`:: + + double ncfdtri(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtridfd`:: + + double ncfdtridfd(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtridfn`:: + + double ncfdtridfn(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtrinc`:: + + double ncfdtrinc(double, double, double, double) + +- :py:func:`~scipy.special.nctdtr`:: + + double nctdtr(double, double, double) + +- :py:func:`~scipy.special.nctdtridf`:: + + double nctdtridf(double, double, double) + +- :py:func:`~scipy.special.nctdtrinc`:: + + double nctdtrinc(double, double, double) + +- :py:func:`~scipy.special.nctdtrit`:: + + double nctdtrit(double, double, double) + +- :py:func:`~scipy.special.ndtr`:: + + double complex ndtr(double complex) + double ndtr(double) + +- :py:func:`~scipy.special.ndtri`:: + + double ndtri(double) + +- :py:func:`~scipy.special.nrdtrimn`:: + + double nrdtrimn(double, double, double) + +- :py:func:`~scipy.special.nrdtrisd`:: + + double nrdtrisd(double, double, double) + +- :py:func:`~scipy.special.obl_ang1`:: + + void obl_ang1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_ang1_cv`:: + + void obl_ang1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_cv`:: + + double obl_cv(double, double, double) + +- :py:func:`~scipy.special.obl_rad1`:: + + void obl_rad1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_rad1_cv`:: + + void obl_rad1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_rad2`:: + + void obl_rad2(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_rad2_cv`:: + + void obl_rad2_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.owens_t`:: + + double owens_t(double, double) + +- :py:func:`~scipy.special.pbdv`:: + + void pbdv(double, double, double *, double *) + +- :py:func:`~scipy.special.pbvv`:: + + void pbvv(double, double, double *, double *) + +- :py:func:`~scipy.special.pbwa`:: + + void pbwa(double, double, double *, double *) + +- :py:func:`~scipy.special.pdtr`:: + + double pdtr(double, double) + +- :py:func:`~scipy.special.pdtrc`:: + + double pdtrc(double, double) + +- :py:func:`~scipy.special.pdtri`:: + + double pdtri(double, double) + double pdtri(long, double) + +- :py:func:`~scipy.special.pdtrik`:: + + double pdtrik(double, double) + +- :py:func:`~scipy.special.poch`:: + + double poch(double, double) + +- :py:func:`~scipy.special.powm1`:: + + float powm1(float, float) + double powm1(double, double) + +- :py:func:`~scipy.special.pro_ang1`:: + + void pro_ang1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_ang1_cv`:: + + void pro_ang1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_cv`:: + + double pro_cv(double, double, double) + +- :py:func:`~scipy.special.pro_rad1`:: + + void pro_rad1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_rad1_cv`:: + + void pro_rad1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_rad2`:: + + void pro_rad2(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_rad2_cv`:: + + void pro_rad2_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pseudo_huber`:: + + double pseudo_huber(double, double) + +- :py:func:`~scipy.special.psi`:: + + double complex psi(double complex) + double psi(double) + +- :py:func:`~scipy.special.radian`:: + + double radian(double, double, double) + +- :py:func:`~scipy.special.rel_entr`:: + + double rel_entr(double, double) + +- :py:func:`~scipy.special.rgamma`:: + + double complex rgamma(double complex) + double rgamma(double) + +- :py:func:`~scipy.special.round`:: + + double round(double) + +- :py:func:`~scipy.special.shichi`:: + + void shichi(double complex, double complex *, double complex *) + void shichi(double, double *, double *) + +- :py:func:`~scipy.special.sici`:: + + void sici(double complex, double complex *, double complex *) + void sici(double, double *, double *) + +- :py:func:`~scipy.special.sindg`:: + + double sindg(double) + +- :py:func:`~scipy.special.smirnov`:: + + double smirnov(double, double) + double smirnov(long, double) + +- :py:func:`~scipy.special.smirnovi`:: + + double smirnovi(double, double) + double smirnovi(long, double) + +- :py:func:`~scipy.special.spence`:: + + double complex spence(double complex) + double spence(double) + +- :py:func:`~scipy.special.sph_harm`:: + + double complex sph_harm(double, double, double, double) + double complex sph_harm(long, long, double, double) + +- :py:func:`~scipy.special.stdtr`:: + + double stdtr(double, double) + +- :py:func:`~scipy.special.stdtridf`:: + + double stdtridf(double, double) + +- :py:func:`~scipy.special.stdtrit`:: + + double stdtrit(double, double) + +- :py:func:`~scipy.special.struve`:: + + double struve(double, double) + +- :py:func:`~scipy.special.tandg`:: + + double tandg(double) + +- :py:func:`~scipy.special.tklmbda`:: + + double tklmbda(double, double) + +- :py:func:`~scipy.special.wofz`:: + + double complex wofz(double complex) + +- :py:func:`~scipy.special.wrightomega`:: + + double complex wrightomega(double complex) + double wrightomega(double) + +- :py:func:`~scipy.special.xlog1py`:: + + double xlog1py(double, double) + double complex xlog1py(double complex, double complex) + +- :py:func:`~scipy.special.xlogy`:: + + double xlogy(double, double) + double complex xlogy(double complex, double complex) + +- :py:func:`~scipy.special.y0`:: + + double y0(double) + +- :py:func:`~scipy.special.y1`:: + + double y1(double) + +- :py:func:`~scipy.special.yn`:: + + double yn(double, double) + double yn(long, double) + +- :py:func:`~scipy.special.yv`:: + + double complex yv(double, double complex) + double yv(double, double) + +- :py:func:`~scipy.special.yve`:: + + double complex yve(double, double complex) + double yve(double, double) + +- :py:func:`~scipy.special.zetac`:: + + double zetac(double) + +- :py:func:`~scipy.special.wright_bessel`:: + + double wright_bessel(double, double, double) + +- :py:func:`~scipy.special.ndtri_exp`:: + + double ndtri_exp(double) + + +Custom functions +---------------- + +Some functions in ``scipy.special`` which are not ufuncs have custom +Cython wrappers. + +Spherical Bessel functions +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The optional ``derivative`` boolean argument is replaced with an +optional Cython ``bint``, leading to the following signatures. + +- :py:func:`~scipy.special.spherical_jn`:: + + double complex spherical_jn(long, double complex) + double complex spherical_jn(long, double complex, bint) + double spherical_jn(long, double) + double spherical_jn(long, double, bint) + +- :py:func:`~scipy.special.spherical_yn`:: + + double complex spherical_yn(long, double complex) + double complex spherical_yn(long, double complex, bint) + double spherical_yn(long, double) + double spherical_yn(long, double, bint) + +- :py:func:`~scipy.special.spherical_in`:: + + double complex spherical_in(long, double complex) + double complex spherical_in(long, double complex, bint) + double spherical_in(long, double) + double spherical_in(long, double, bint) + +- :py:func:`~scipy.special.spherical_kn`:: + + double complex spherical_kn(long, double complex) + double complex spherical_kn(long, double complex, bint) + double spherical_kn(long, double) + double spherical_kn(long, double, bint) + +""" + +from libc.math cimport NAN + +include "_cython_special.pxi" +include "_cython_special_custom.pxi" + +from ._agm cimport agm as _func_agm +ctypedef double _proto_agm_t(double, double) noexcept nogil +cdef _proto_agm_t *_proto_agm_t_var = &_func_agm +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_airy_wrap "airy_wrap"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cairy_wrap "cairy_wrap"(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cairy_wrap_e "cairy_wrap_e"(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cairy_wrap_e_real "cairy_wrap_e_real"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +from ._legacy cimport bdtr_unsafe as _func_bdtr_unsafe +ctypedef double _proto_bdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtr_unsafe_t *_proto_bdtr_unsafe_t_var = &_func_bdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bdtr "bdtr"(npy_double, npy_int, npy_double)nogil +from ._legacy cimport bdtrc_unsafe as _func_bdtrc_unsafe +ctypedef double _proto_bdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtrc_unsafe_t *_proto_bdtrc_unsafe_t_var = &_func_bdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bdtrc "bdtrc"(npy_double, npy_int, npy_double)nogil +from ._legacy cimport bdtri_unsafe as _func_bdtri_unsafe +ctypedef double _proto_bdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtri_unsafe_t *_proto_bdtri_unsafe_t_var = &_func_bdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bdtri "bdtri"(npy_double, npy_int, npy_double)nogil +from ._cdflib_wrappers cimport bdtrik as _func_bdtrik +ctypedef double _proto_bdtrik_t(double, double, double) noexcept nogil +cdef _proto_bdtrik_t *_proto_bdtrik_t_var = &_func_bdtrik +from ._cdflib_wrappers cimport bdtrin as _func_bdtrin +ctypedef double _proto_bdtrin_t(double, double, double) noexcept nogil +cdef _proto_bdtrin_t *_proto_bdtrin_t_var = &_func_bdtrin +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bei_wrap "bei_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_beip_wrap "beip_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ber_wrap "ber_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_berp_wrap "berp_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_besselpoly "besselpoly"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_beta "beta"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_lbeta "lbeta"(npy_double, npy_double)nogil +from ._boxcox cimport boxcox as _func_boxcox +ctypedef double _proto_boxcox_t(double, double) noexcept nogil +cdef _proto_boxcox_t *_proto_boxcox_t_var = &_func_boxcox +from ._boxcox cimport boxcox1p as _func_boxcox1p +ctypedef double _proto_boxcox1p_t(double, double) noexcept nogil +cdef _proto_boxcox1p_t *_proto_boxcox1p_t_var = &_func_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_btdtr "btdtr"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_incbi "incbi"(npy_double, npy_double, npy_double)nogil +from ._cdflib_wrappers cimport btdtria as _func_btdtria +ctypedef double _proto_btdtria_t(double, double, double) noexcept nogil +cdef _proto_btdtria_t *_proto_btdtria_t_var = &_func_btdtria +from ._cdflib_wrappers cimport btdtrib as _func_btdtrib +ctypedef double _proto_btdtrib_t(double, double, double) noexcept nogil +cdef _proto_btdtrib_t *_proto_btdtrib_t_var = &_func_btdtrib +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbrt "cbrt"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_chdtr "chdtr"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_chdtrc "chdtrc"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_chdtri "chdtri"(npy_double, npy_double)nogil +from ._cdflib_wrappers cimport chdtriv as _func_chdtriv +ctypedef double _proto_chdtriv_t(double, double) noexcept nogil +cdef _proto_chdtriv_t *_proto_chdtriv_t_var = &_func_chdtriv +from ._cdflib_wrappers cimport chndtr as _func_chndtr +ctypedef double _proto_chndtr_t(double, double, double) noexcept nogil +cdef _proto_chndtr_t *_proto_chndtr_t_var = &_func_chndtr +from ._cdflib_wrappers cimport chndtridf as _func_chndtridf +ctypedef double _proto_chndtridf_t(double, double, double) noexcept nogil +cdef _proto_chndtridf_t *_proto_chndtridf_t_var = &_func_chndtridf +from ._cdflib_wrappers cimport chndtrinc as _func_chndtrinc +ctypedef double _proto_chndtrinc_t(double, double, double) noexcept nogil +cdef _proto_chndtrinc_t *_proto_chndtrinc_t_var = &_func_chndtrinc +from ._cdflib_wrappers cimport chndtrix as _func_chndtrix +ctypedef double _proto_chndtrix_t(double, double, double) noexcept nogil +cdef _proto_chndtrix_t *_proto_chndtrix_t_var = &_func_chndtrix +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cosdg "cosdg"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cosm1 "cosm1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cotdg "cotdg"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellpe "ellpe"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellie "ellie"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_ellpj "ellpj"(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellik "ellik"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellpk "ellpk"(npy_double)nogil +from ._ellipk cimport ellipk as _func_ellipk +ctypedef double _proto_ellipk_t(double) noexcept nogil +cdef _proto_ellipk_t *_proto_ellipk_t_var = &_func_ellipk +from ._convex_analysis cimport entr as _func_entr +ctypedef double _proto_entr_t(double) noexcept nogil +cdef _proto_entr_t *_proto_entr_t_var = &_func_entr +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_erf "erf"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_erfc "erfc"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_erfcinv "erfcinv"(npy_double)nogil +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double complex _proto_eval_chebyc_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyc_double_complex__t *_proto_eval_chebyc_double_complex__t_var = &_func_eval_chebyc[double_complex] +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double _proto_eval_chebyc_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyc_double__t *_proto_eval_chebyc_double__t_var = &_func_eval_chebyc[double] +from .orthogonal_eval cimport eval_chebyc_l as _func_eval_chebyc_l +ctypedef double _proto_eval_chebyc_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyc_l_t *_proto_eval_chebyc_l_t_var = &_func_eval_chebyc_l +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double complex _proto_eval_chebys_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebys_double_complex__t *_proto_eval_chebys_double_complex__t_var = &_func_eval_chebys[double_complex] +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double _proto_eval_chebys_double__t(double, double) noexcept nogil +cdef _proto_eval_chebys_double__t *_proto_eval_chebys_double__t_var = &_func_eval_chebys[double] +from .orthogonal_eval cimport eval_chebys_l as _func_eval_chebys_l +ctypedef double _proto_eval_chebys_l_t(long, double) noexcept nogil +cdef _proto_eval_chebys_l_t *_proto_eval_chebys_l_t_var = &_func_eval_chebys_l +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double complex _proto_eval_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyt_double_complex__t *_proto_eval_chebyt_double_complex__t_var = &_func_eval_chebyt[double_complex] +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double _proto_eval_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyt_double__t *_proto_eval_chebyt_double__t_var = &_func_eval_chebyt[double] +from .orthogonal_eval cimport eval_chebyt_l as _func_eval_chebyt_l +ctypedef double _proto_eval_chebyt_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyt_l_t *_proto_eval_chebyt_l_t_var = &_func_eval_chebyt_l +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double complex _proto_eval_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyu_double_complex__t *_proto_eval_chebyu_double_complex__t_var = &_func_eval_chebyu[double_complex] +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double _proto_eval_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyu_double__t *_proto_eval_chebyu_double__t_var = &_func_eval_chebyu[double] +from .orthogonal_eval cimport eval_chebyu_l as _func_eval_chebyu_l +ctypedef double _proto_eval_chebyu_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyu_l_t *_proto_eval_chebyu_l_t_var = &_func_eval_chebyu_l +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double complex _proto_eval_gegenbauer_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_gegenbauer_double_complex__t *_proto_eval_gegenbauer_double_complex__t_var = &_func_eval_gegenbauer[double_complex] +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double _proto_eval_gegenbauer_double__t(double, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_double__t *_proto_eval_gegenbauer_double__t_var = &_func_eval_gegenbauer[double] +from .orthogonal_eval cimport eval_gegenbauer_l as _func_eval_gegenbauer_l +ctypedef double _proto_eval_gegenbauer_l_t(long, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_l_t *_proto_eval_gegenbauer_l_t_var = &_func_eval_gegenbauer_l +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double complex _proto_eval_genlaguerre_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_genlaguerre_double_complex__t *_proto_eval_genlaguerre_double_complex__t_var = &_func_eval_genlaguerre[double_complex] +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double _proto_eval_genlaguerre_double__t(double, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_double__t *_proto_eval_genlaguerre_double__t_var = &_func_eval_genlaguerre[double] +from .orthogonal_eval cimport eval_genlaguerre_l as _func_eval_genlaguerre_l +ctypedef double _proto_eval_genlaguerre_l_t(long, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_l_t *_proto_eval_genlaguerre_l_t_var = &_func_eval_genlaguerre_l +from .orthogonal_eval cimport eval_hermite as _func_eval_hermite +ctypedef double _proto_eval_hermite_t(long, double) noexcept nogil +cdef _proto_eval_hermite_t *_proto_eval_hermite_t_var = &_func_eval_hermite +from .orthogonal_eval cimport eval_hermitenorm as _func_eval_hermitenorm +ctypedef double _proto_eval_hermitenorm_t(long, double) noexcept nogil +cdef _proto_eval_hermitenorm_t *_proto_eval_hermitenorm_t_var = &_func_eval_hermitenorm +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double complex _proto_eval_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_jacobi_double_complex__t *_proto_eval_jacobi_double_complex__t_var = &_func_eval_jacobi[double_complex] +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double _proto_eval_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_double__t *_proto_eval_jacobi_double__t_var = &_func_eval_jacobi[double] +from .orthogonal_eval cimport eval_jacobi_l as _func_eval_jacobi_l +ctypedef double _proto_eval_jacobi_l_t(long, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_l_t *_proto_eval_jacobi_l_t_var = &_func_eval_jacobi_l +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double complex _proto_eval_laguerre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_laguerre_double_complex__t *_proto_eval_laguerre_double_complex__t_var = &_func_eval_laguerre[double_complex] +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double _proto_eval_laguerre_double__t(double, double) noexcept nogil +cdef _proto_eval_laguerre_double__t *_proto_eval_laguerre_double__t_var = &_func_eval_laguerre[double] +from .orthogonal_eval cimport eval_laguerre_l as _func_eval_laguerre_l +ctypedef double _proto_eval_laguerre_l_t(long, double) noexcept nogil +cdef _proto_eval_laguerre_l_t *_proto_eval_laguerre_l_t_var = &_func_eval_laguerre_l +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double complex _proto_eval_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_legendre_double_complex__t *_proto_eval_legendre_double_complex__t_var = &_func_eval_legendre[double_complex] +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double _proto_eval_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_legendre_double__t *_proto_eval_legendre_double__t_var = &_func_eval_legendre[double] +from .orthogonal_eval cimport eval_legendre_l as _func_eval_legendre_l +ctypedef double _proto_eval_legendre_l_t(long, double) noexcept nogil +cdef _proto_eval_legendre_l_t *_proto_eval_legendre_l_t_var = &_func_eval_legendre_l +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double complex _proto_eval_sh_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyt_double_complex__t *_proto_eval_sh_chebyt_double_complex__t_var = &_func_eval_sh_chebyt[double_complex] +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double _proto_eval_sh_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyt_double__t *_proto_eval_sh_chebyt_double__t_var = &_func_eval_sh_chebyt[double] +from .orthogonal_eval cimport eval_sh_chebyt_l as _func_eval_sh_chebyt_l +ctypedef double _proto_eval_sh_chebyt_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_chebyt_l_t *_proto_eval_sh_chebyt_l_t_var = &_func_eval_sh_chebyt_l +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double complex _proto_eval_sh_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyu_double_complex__t *_proto_eval_sh_chebyu_double_complex__t_var = &_func_eval_sh_chebyu[double_complex] +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double _proto_eval_sh_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyu_double__t *_proto_eval_sh_chebyu_double__t_var = &_func_eval_sh_chebyu[double] +from .orthogonal_eval cimport eval_sh_chebyu_l as _func_eval_sh_chebyu_l +ctypedef double _proto_eval_sh_chebyu_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_chebyu_l_t *_proto_eval_sh_chebyu_l_t_var = &_func_eval_sh_chebyu_l +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double complex _proto_eval_sh_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_sh_jacobi_double_complex__t *_proto_eval_sh_jacobi_double_complex__t_var = &_func_eval_sh_jacobi[double_complex] +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double _proto_eval_sh_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_double__t *_proto_eval_sh_jacobi_double__t_var = &_func_eval_sh_jacobi[double] +from .orthogonal_eval cimport eval_sh_jacobi_l as _func_eval_sh_jacobi_l +ctypedef double _proto_eval_sh_jacobi_l_t(long, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_l_t *_proto_eval_sh_jacobi_l_t_var = &_func_eval_sh_jacobi_l +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double complex _proto_eval_sh_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_legendre_double_complex__t *_proto_eval_sh_legendre_double_complex__t_var = &_func_eval_sh_legendre[double_complex] +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double _proto_eval_sh_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_legendre_double__t *_proto_eval_sh_legendre_double__t_var = &_func_eval_sh_legendre[double] +from .orthogonal_eval cimport eval_sh_legendre_l as _func_eval_sh_legendre_l +ctypedef double _proto_eval_sh_legendre_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_legendre_l_t *_proto_eval_sh_legendre_l_t_var = &_func_eval_sh_legendre_l +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cexp1_wrap "cexp1_wrap"(npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_exp1_wrap "exp1_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_exp10 "exp10"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_exp2 "exp2"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cexpi_wrap "cexpi_wrap"(npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_expi_wrap "expi_wrap"(npy_double)nogil +from ._cunity cimport cexpm1 as _func_cexpm1 +ctypedef double complex _proto_cexpm1_t(double complex) noexcept nogil +cdef _proto_cexpm1_t *_proto_cexpm1_t_var = &_func_cexpm1 +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_expm1 "expm1"(npy_double)nogil +from ._legacy cimport expn_unsafe as _func_expn_unsafe +ctypedef double _proto_expn_unsafe_t(double, double) noexcept nogil +cdef _proto_expn_unsafe_t *_proto_expn_unsafe_t_var = &_func_expn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_expn "expn"(npy_int, npy_double)nogil +from ._exprel cimport exprel as _func_exprel +ctypedef double _proto_exprel_t(double) noexcept nogil +cdef _proto_exprel_t *_proto_exprel_t_var = &_func_exprel +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_fdtr "fdtr"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_fdtrc "fdtrc"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_fdtri "fdtri"(npy_double, npy_double, npy_double)nogil +from ._cdflib_wrappers cimport fdtridfd as _func_fdtridfd +ctypedef double _proto_fdtridfd_t(double, double, double) noexcept nogil +cdef _proto_fdtridfd_t *_proto_fdtridfd_t_var = &_func_fdtridfd +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_fresnl "fresnl"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cfresnl_wrap "cfresnl_wrap"(npy_cdouble, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_Gamma "Gamma"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igam "igam"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igamc "igamc"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igamci "igamci"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igami "igami"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_lgam "lgam"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_gammasgn "gammasgn"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_gdtr "gdtr"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_gdtrc "gdtrc"(npy_double, npy_double, npy_double)nogil +from ._cdflib_wrappers cimport gdtria as _func_gdtria +ctypedef double _proto_gdtria_t(double, double, double) noexcept nogil +cdef _proto_gdtria_t *_proto_gdtria_t_var = &_func_gdtria +from ._cdflib_wrappers cimport gdtrib as _func_gdtrib +ctypedef double _proto_gdtrib_t(double, double, double) noexcept nogil +cdef _proto_gdtrib_t *_proto_gdtrib_t_var = &_func_gdtrib +from ._cdflib_wrappers cimport gdtrix as _func_gdtrix +ctypedef double _proto_gdtrix_t(double, double, double) noexcept nogil +cdef _proto_gdtrix_t *_proto_gdtrix_t_var = &_func_gdtrix +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap1 "cbesh_wrap1"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap1_e "cbesh_wrap1_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap2 "cbesh_wrap2"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap2_e "cbesh_wrap2_e"(npy_double, npy_cdouble)nogil +from ._convex_analysis cimport huber as _func_huber +ctypedef double _proto_huber_t(double, double) noexcept nogil +cdef _proto_huber_t *_proto_huber_t_var = &_func_huber +from ._hyp0f1 cimport _hyp0f1_cmplx as _func__hyp0f1_cmplx +ctypedef double complex _proto__hyp0f1_cmplx_t(double, double complex) noexcept nogil +cdef _proto__hyp0f1_cmplx_t *_proto__hyp0f1_cmplx_t_var = &_func__hyp0f1_cmplx +from ._hyp0f1 cimport _hyp0f1_real as _func__hyp0f1_real +ctypedef double _proto__hyp0f1_real_t(double, double) noexcept nogil +cdef _proto__hyp0f1_real_t *_proto__hyp0f1_real_t_var = &_func__hyp0f1_real +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_chyp1f1_wrap "chyp1f1_wrap"(npy_double, npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_hyp2f1 "hyp2f1"(npy_double, npy_double, npy_double, npy_double)nogil +from ._hyp2f1 cimport hyp2f1_complex as _func_hyp2f1_complex +ctypedef double complex _proto_hyp2f1_complex_t(double, double, double, double complex) noexcept nogil +cdef _proto_hyp2f1_complex_t *_proto_hyp2f1_complex_t_var = &_func_hyp2f1_complex +from ._hypergeometric cimport hyperu as _func_hyperu +ctypedef double _proto_hyperu_t(double, double, double) noexcept nogil +cdef _proto_hyperu_t *_proto_hyperu_t_var = &_func_hyperu +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i0 "i0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i0e "i0e"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i1 "i1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i1e "i1e"(npy_double)nogil +from ._boxcox cimport inv_boxcox as _func_inv_boxcox +ctypedef double _proto_inv_boxcox_t(double, double) noexcept nogil +cdef _proto_inv_boxcox_t *_proto_inv_boxcox_t_var = &_func_inv_boxcox +from ._boxcox cimport inv_boxcox1p as _func_inv_boxcox1p +ctypedef double _proto_inv_boxcox1p_t(double, double) noexcept nogil +cdef _proto_inv_boxcox1p_t *_proto_inv_boxcox1p_t_var = &_func_inv_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it2i0k0_wrap "it2i0k0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it2j0y0_wrap "it2j0y0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_it2struve0_wrap "it2struve0_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_itairy_wrap "itairy_wrap"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it1i0k0_wrap "it1i0k0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it1j0y0_wrap "it1j0y0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_itmodstruve0_wrap "itmodstruve0_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_itstruve0_wrap "itstruve0_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesi_wrap "cbesi_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_iv "iv"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesi_wrap_e "cbesi_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesi_wrap_e_real "cbesi_wrap_e_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_j0 "j0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_j1 "j1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesj_wrap "cbesj_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesj_wrap_real "cbesj_wrap_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesj_wrap_e "cbesj_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesj_wrap_e_real "cbesj_wrap_e_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k0 "k0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k0e "k0e"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k1 "k1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k1e "k1e"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kei_wrap "kei_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_keip_wrap "keip_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_kelvin_wrap "kelvin_wrap"(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ker_wrap "ker_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kerp_wrap "kerp_wrap"(npy_double)nogil +from ._convex_analysis cimport kl_div as _func_kl_div +ctypedef double _proto_kl_div_t(double, double) noexcept nogil +cdef _proto_kl_div_t *_proto_kl_div_t_var = &_func_kl_div +from ._legacy cimport kn_unsafe as _func_kn_unsafe +ctypedef double _proto_kn_unsafe_t(double, double) noexcept nogil +cdef _proto_kn_unsafe_t *_proto_kn_unsafe_t_var = &_func_kn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesk_wrap_real_int "cbesk_wrap_real_int"(npy_int, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kolmogi "kolmogi"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kolmogorov "kolmogorov"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesk_wrap "cbesk_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesk_wrap_real "cbesk_wrap_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesk_wrap_e "cbesk_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesk_wrap_e_real "cbesk_wrap_e_real"(npy_double, npy_double)nogil +from ._cunity cimport clog1p as _func_clog1p +ctypedef double complex _proto_clog1p_t(double complex) noexcept nogil +cdef _proto_clog1p_t *_proto_clog1p_t_var = &_func_clog1p +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_log1p "log1p"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pmv_wrap "pmv_wrap"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cem_cva_wrap "cem_cva_wrap"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_sem_cva_wrap "sem_cva_wrap"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cem_wrap "cem_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_mcm1_wrap "mcm1_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_mcm2_wrap "mcm2_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_msm1_wrap "msm1_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_msm2_wrap "msm2_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_sem_wrap "sem_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_modified_fresnel_minus_wrap "modified_fresnel_minus_wrap"(npy_double, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_modified_fresnel_plus_wrap "modified_fresnel_plus_wrap"(npy_double, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_struve_l "struve_l"(npy_double, npy_double)nogil +from ._legacy cimport nbdtr_unsafe as _func_nbdtr_unsafe +ctypedef double _proto_nbdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtr_unsafe_t *_proto_nbdtr_unsafe_t_var = &_func_nbdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_nbdtr "nbdtr"(npy_int, npy_int, npy_double)nogil +from ._legacy cimport nbdtrc_unsafe as _func_nbdtrc_unsafe +ctypedef double _proto_nbdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtrc_unsafe_t *_proto_nbdtrc_unsafe_t_var = &_func_nbdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_nbdtrc "nbdtrc"(npy_int, npy_int, npy_double)nogil +from ._legacy cimport nbdtri_unsafe as _func_nbdtri_unsafe +ctypedef double _proto_nbdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtri_unsafe_t *_proto_nbdtri_unsafe_t_var = &_func_nbdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_nbdtri "nbdtri"(npy_int, npy_int, npy_double)nogil +from ._cdflib_wrappers cimport nbdtrik as _func_nbdtrik +ctypedef double _proto_nbdtrik_t(double, double, double) noexcept nogil +cdef _proto_nbdtrik_t *_proto_nbdtrik_t_var = &_func_nbdtrik +from ._cdflib_wrappers cimport nbdtrin as _func_nbdtrin +ctypedef double _proto_nbdtrin_t(double, double, double) noexcept nogil +cdef _proto_nbdtrin_t *_proto_nbdtrin_t_var = &_func_nbdtrin +from ._cdflib_wrappers cimport ncfdtr as _func_ncfdtr +ctypedef double _proto_ncfdtr_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtr_t *_proto_ncfdtr_t_var = &_func_ncfdtr +from ._cdflib_wrappers cimport ncfdtri as _func_ncfdtri +ctypedef double _proto_ncfdtri_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtri_t *_proto_ncfdtri_t_var = &_func_ncfdtri +from ._cdflib_wrappers cimport ncfdtridfd as _func_ncfdtridfd +ctypedef double _proto_ncfdtridfd_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfd_t *_proto_ncfdtridfd_t_var = &_func_ncfdtridfd +from ._cdflib_wrappers cimport ncfdtridfn as _func_ncfdtridfn +ctypedef double _proto_ncfdtridfn_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfn_t *_proto_ncfdtridfn_t_var = &_func_ncfdtridfn +from ._cdflib_wrappers cimport ncfdtrinc as _func_ncfdtrinc +ctypedef double _proto_ncfdtrinc_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtrinc_t *_proto_ncfdtrinc_t_var = &_func_ncfdtrinc +from ._cdflib_wrappers cimport nctdtr as _func_nctdtr +ctypedef double _proto_nctdtr_t(double, double, double) noexcept nogil +cdef _proto_nctdtr_t *_proto_nctdtr_t_var = &_func_nctdtr +from ._cdflib_wrappers cimport nctdtridf as _func_nctdtridf +ctypedef double _proto_nctdtridf_t(double, double, double) noexcept nogil +cdef _proto_nctdtridf_t *_proto_nctdtridf_t_var = &_func_nctdtridf +from ._cdflib_wrappers cimport nctdtrinc as _func_nctdtrinc +ctypedef double _proto_nctdtrinc_t(double, double, double) noexcept nogil +cdef _proto_nctdtrinc_t *_proto_nctdtrinc_t_var = &_func_nctdtrinc +from ._cdflib_wrappers cimport nctdtrit as _func_nctdtrit +ctypedef double _proto_nctdtrit_t(double, double, double) noexcept nogil +cdef _proto_nctdtrit_t *_proto_nctdtrit_t_var = &_func_nctdtrit +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ndtr "ndtr"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ndtri "ndtri"(npy_double)nogil +from ._cdflib_wrappers cimport nrdtrimn as _func_nrdtrimn +ctypedef double _proto_nrdtrimn_t(double, double, double) noexcept nogil +cdef _proto_nrdtrimn_t *_proto_nrdtrimn_t_var = &_func_nrdtrimn +from ._cdflib_wrappers cimport nrdtrisd as _func_nrdtrisd +ctypedef double _proto_nrdtrisd_t(double, double, double) noexcept nogil +cdef _proto_nrdtrisd_t *_proto_nrdtrisd_t_var = &_func_nrdtrisd +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_aswfa_nocv_wrap "oblate_aswfa_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_oblate_aswfa_wrap "oblate_aswfa_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_segv_wrap "oblate_segv_wrap"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_radial1_nocv_wrap "oblate_radial1_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_oblate_radial1_wrap "oblate_radial1_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_radial2_nocv_wrap "oblate_radial2_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_oblate_radial2_wrap "oblate_radial2_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_owens_t "owens_t"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_pbdv_wrap "pbdv_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_pbvv_wrap "pbvv_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_pbwa_wrap "pbwa_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pdtr "pdtr"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pdtrc "pdtrc"(npy_double, npy_double)nogil +from ._legacy cimport pdtri_unsafe as _func_pdtri_unsafe +ctypedef double _proto_pdtri_unsafe_t(double, double) noexcept nogil +cdef _proto_pdtri_unsafe_t *_proto_pdtri_unsafe_t_var = &_func_pdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pdtri "pdtri"(npy_int, npy_double)nogil +from ._cdflib_wrappers cimport pdtrik as _func_pdtrik +ctypedef double _proto_pdtrik_t(double, double) noexcept nogil +cdef _proto_pdtrik_t *_proto_pdtrik_t_var = &_func_pdtrik +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_poch "poch"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_aswfa_nocv_wrap "prolate_aswfa_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_prolate_aswfa_wrap "prolate_aswfa_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_segv_wrap "prolate_segv_wrap"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_radial1_nocv_wrap "prolate_radial1_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_prolate_radial1_wrap "prolate_radial1_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_radial2_nocv_wrap "prolate_radial2_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_prolate_radial2_wrap "prolate_radial2_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +from ._convex_analysis cimport pseudo_huber as _func_pseudo_huber +ctypedef double _proto_pseudo_huber_t(double, double) noexcept nogil +cdef _proto_pseudo_huber_t *_proto_pseudo_huber_t_var = &_func_pseudo_huber +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_radian "radian"(npy_double, npy_double, npy_double)nogil +from ._convex_analysis cimport rel_entr as _func_rel_entr +ctypedef double _proto_rel_entr_t(double, double) noexcept nogil +cdef _proto_rel_entr_t *_proto_rel_entr_t_var = &_func_rel_entr +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_rgamma "rgamma"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_round "round"(npy_double)nogil +from ._sici cimport cshichi as _func_cshichi +ctypedef int _proto_cshichi_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_cshichi_t *_proto_cshichi_t_var = &_func_cshichi +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_shichi "shichi"(npy_double, npy_double *, npy_double *)nogil +from ._sici cimport csici as _func_csici +ctypedef int _proto_csici_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_csici_t *_proto_csici_t_var = &_func_csici +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_sici "sici"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_sindg "sindg"(npy_double)nogil +from ._legacy cimport smirnov_unsafe as _func_smirnov_unsafe +ctypedef double _proto_smirnov_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnov_unsafe_t *_proto_smirnov_unsafe_t_var = &_func_smirnov_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_smirnov "smirnov"(npy_int, npy_double)nogil +from ._legacy cimport smirnovi_unsafe as _func_smirnovi_unsafe +ctypedef double _proto_smirnovi_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovi_unsafe_t *_proto_smirnovi_unsafe_t_var = &_func_smirnovi_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_smirnovi "smirnovi"(npy_int, npy_double)nogil +from ._spence cimport cspence as _func_cspence +ctypedef double complex _proto_cspence_t(double complex) noexcept nogil +cdef _proto_cspence_t *_proto_cspence_t_var = &_func_cspence +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_spence "spence"(npy_double)nogil +from ._legacy cimport sph_harmonic_unsafe as _func_sph_harmonic_unsafe +ctypedef double complex _proto_sph_harmonic_unsafe_t(double, double, double, double) noexcept nogil +cdef _proto_sph_harmonic_unsafe_t *_proto_sph_harmonic_unsafe_t_var = &_func_sph_harmonic_unsafe +from .sph_harm cimport sph_harmonic as _func_sph_harmonic +ctypedef double complex _proto_sph_harmonic_t(int, int, double, double) noexcept nogil +cdef _proto_sph_harmonic_t *_proto_sph_harmonic_t_var = &_func_sph_harmonic +from ._cdflib_wrappers cimport stdtr as _func_stdtr +ctypedef double _proto_stdtr_t(double, double) noexcept nogil +cdef _proto_stdtr_t *_proto_stdtr_t_var = &_func_stdtr +from ._cdflib_wrappers cimport stdtridf as _func_stdtridf +ctypedef double _proto_stdtridf_t(double, double) noexcept nogil +cdef _proto_stdtridf_t *_proto_stdtridf_t_var = &_func_stdtridf +from ._cdflib_wrappers cimport stdtrit as _func_stdtrit +ctypedef double _proto_stdtrit_t(double, double) noexcept nogil +cdef _proto_stdtrit_t *_proto_stdtrit_t_var = &_func_stdtrit +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_struve_h "struve_h"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_tandg "tandg"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_tukeylambdacdf "tukeylambdacdf"(npy_double, npy_double)nogil +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double _proto_xlog1py_double__t(double, double) noexcept nogil +cdef _proto_xlog1py_double__t *_proto_xlog1py_double__t_var = &_func_xlog1py[double] +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double complex _proto_xlog1py_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlog1py_double_complex__t *_proto_xlog1py_double_complex__t_var = &_func_xlog1py[double_complex] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double _proto_xlogy_double__t(double, double) noexcept nogil +cdef _proto_xlogy_double__t *_proto_xlogy_double__t_var = &_func_xlogy[double] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double complex _proto_xlogy_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlogy_double_complex__t *_proto_xlogy_double_complex__t_var = &_func_xlogy[double_complex] +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_y0 "y0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_y1 "y1"(npy_double)nogil +from ._legacy cimport yn_unsafe as _func_yn_unsafe +ctypedef double _proto_yn_unsafe_t(double, double) noexcept nogil +cdef _proto_yn_unsafe_t *_proto_yn_unsafe_t_var = &_func_yn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_yn "yn"(npy_int, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesy_wrap "cbesy_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesy_wrap_real "cbesy_wrap_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesy_wrap_e "cbesy_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesy_wrap_e_real "cbesy_wrap_e_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_zetac "zetac"(npy_double)nogil +from ._wright_bessel cimport wright_bessel_scalar as _func_wright_bessel_scalar +ctypedef double _proto_wright_bessel_scalar_t(double, double, double) noexcept nogil +cdef _proto_wright_bessel_scalar_t *_proto_wright_bessel_scalar_t_var = &_func_wright_bessel_scalar +from ._ndtri_exp cimport ndtri_exp as _func_ndtri_exp +ctypedef double _proto_ndtri_exp_t(double) noexcept nogil +cdef _proto_ndtri_exp_t *_proto_ndtri_exp_t_var = &_func_ndtri_exp + +cpdef double voigt_profile(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.voigt_profile""" + return (scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile)(x0, x1, x2) + +cpdef double agm(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.agm""" + return _func_agm(x0, x1) + +cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil: + """See the documentation for scipy.special.airy""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + cdef npy_cdouble tmp2 + cdef npy_cdouble tmp3 + if Dd_number_t is double: + _func_airy_wrap(x0, y0, y1, y2, y3) + elif Dd_number_t is double_complex: + _func_cairy_wrap(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1, &tmp2, &tmp3) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2) + y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + +def _airy_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + cdef Dd_number_t y2 + cdef Dd_number_t y3 + airy(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil: + """See the documentation for scipy.special.airye""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + cdef npy_cdouble tmp2 + cdef npy_cdouble tmp3 + if Dd_number_t is double_complex: + _func_cairy_wrap_e(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1, &tmp2, &tmp3) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2) + y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3) + elif Dd_number_t is double: + _func_cairy_wrap_e_real(x0, y0, y1, y2, y3) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + +def _airye_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + cdef Dd_number_t y2 + cdef Dd_number_t y3 + airye(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cpdef double bdtr(double x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtr""" + if dl_number_t is double: + return _func_bdtr_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_bdtr(x0, x1, x2) + else: + return NAN + +cpdef double bdtrc(double x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtrc""" + if dl_number_t is double: + return _func_bdtrc_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_bdtrc(x0, x1, x2) + else: + return NAN + +cpdef double bdtri(double x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtri""" + if dl_number_t is double: + return _func_bdtri_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_bdtri(x0, x1, x2) + else: + return NAN + +cpdef double bdtrik(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtrik""" + return _func_bdtrik(x0, x1, x2) + +cpdef double bdtrin(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtrin""" + return _func_bdtrin(x0, x1, x2) + +cpdef double bei(double x0) noexcept nogil: + """See the documentation for scipy.special.bei""" + return _func_bei_wrap(x0) + +cpdef double beip(double x0) noexcept nogil: + """See the documentation for scipy.special.beip""" + return _func_beip_wrap(x0) + +cpdef double ber(double x0) noexcept nogil: + """See the documentation for scipy.special.ber""" + return _func_ber_wrap(x0) + +cpdef double berp(double x0) noexcept nogil: + """See the documentation for scipy.special.berp""" + return _func_berp_wrap(x0) + +cpdef double besselpoly(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.besselpoly""" + return _func_besselpoly(x0, x1, x2) + +cpdef double beta(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.beta""" + return _func_beta(x0, x1) + +cpdef df_number_t betainc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betainc""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibeta_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibeta_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef df_number_t betaincc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betaincc""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibetac_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibetac_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef df_number_t betaincinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betaincinv""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibeta_inv_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibeta_inv_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef df_number_t betainccinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betainccinv""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibetac_inv_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibetac_inv_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef double betaln(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.betaln""" + return _func_lbeta(x0, x1) + +cpdef double binom(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.binom""" + return (scipy.special._ufuncs_cxx._export_binom)(x0, x1) + +cpdef double boxcox(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.boxcox""" + return _func_boxcox(x0, x1) + +cpdef double boxcox1p(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.boxcox1p""" + return _func_boxcox1p(x0, x1) + +cpdef double btdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtr""" + return _func_btdtr(x0, x1, x2) + +cpdef double btdtri(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtri""" + return _func_incbi(x0, x1, x2) + +cpdef double btdtria(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtria""" + return _func_btdtria(x0, x1, x2) + +cpdef double btdtrib(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtrib""" + return _func_btdtrib(x0, x1, x2) + +cpdef double cbrt(double x0) noexcept nogil: + """See the documentation for scipy.special.cbrt""" + return _func_cbrt(x0) + +cpdef double chdtr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtr""" + return _func_chdtr(x0, x1) + +cpdef double chdtrc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtrc""" + return _func_chdtrc(x0, x1) + +cpdef double chdtri(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtri""" + return _func_chdtri(x0, x1) + +cpdef double chdtriv(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtriv""" + return _func_chdtriv(x0, x1) + +cpdef double chndtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtr""" + return _func_chndtr(x0, x1, x2) + +cpdef double chndtridf(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtridf""" + return _func_chndtridf(x0, x1, x2) + +cpdef double chndtrinc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtrinc""" + return _func_chndtrinc(x0, x1, x2) + +cpdef double chndtrix(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtrix""" + return _func_chndtrix(x0, x1, x2) + +cpdef double cosdg(double x0) noexcept nogil: + """See the documentation for scipy.special.cosdg""" + return _func_cosdg(x0) + +cpdef double cosm1(double x0) noexcept nogil: + """See the documentation for scipy.special.cosm1""" + return _func_cosm1(x0) + +cpdef double cotdg(double x0) noexcept nogil: + """See the documentation for scipy.special.cotdg""" + return _func_cotdg(x0) + +cpdef Dd_number_t dawsn(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.dawsn""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_dawsn)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double ellipe(double x0) noexcept nogil: + """See the documentation for scipy.special.ellipe""" + return _func_ellpe(x0) + +cpdef double ellipeinc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.ellipeinc""" + return _func_ellie(x0, x1) + +cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) noexcept nogil: + """See the documentation for scipy.special.ellipj""" + _func_ellpj(x0, x1, y0, y1, y2, y3) + +def _ellipj_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + cdef double y2 + cdef double y3 + ellipj(x0, x1, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cpdef double ellipkinc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.ellipkinc""" + return _func_ellik(x0, x1) + +cpdef double ellipkm1(double x0) noexcept nogil: + """See the documentation for scipy.special.ellipkm1""" + return _func_ellpk(x0) + +cpdef double ellipk(double x0) noexcept nogil: + """See the documentation for scipy.special.ellipk""" + return _func_ellipk(x0) + +cpdef Dd_number_t elliprc(Dd_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.elliprc""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RC)(x0, x1) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RC)(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprd(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.elliprd""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RD)(x0, x1, x2) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RD)(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprf(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.elliprf""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RF)(x0, x1, x2) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RF)(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprg(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.elliprg""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RG)(x0, x1, x2) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RG)(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprj(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.elliprj""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RJ)(x0, x1, x2, x3) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RJ)(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double entr(double x0) noexcept nogil: + """See the documentation for scipy.special.entr""" + return _func_entr(x0) + +cpdef Dd_number_t erf(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erf""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erf)(x0) + elif Dd_number_t is double: + return _func_erf(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t erfc(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfc""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex)(x0) + elif Dd_number_t is double: + return _func_erfc(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t erfcx(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfcx""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfcx)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t erfi(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfi""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfi)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef df_number_t erfinv(df_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfinv""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_erfinv_float)(x0) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_erfinv_double)(x0) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef double erfcinv(double x0) noexcept nogil: + """See the documentation for scipy.special.erfcinv""" + return _func_erfcinv(x0) + +cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebyc""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebyc[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebyc[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebyc_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebys""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebys[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebys[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebys_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebyt""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebyt[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebyt[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebyt_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebyu""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebyu[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebyu[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebyu_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.eval_gegenbauer""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_gegenbauer[double_complex](x0, x1, x2) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_gegenbauer[double](x0, x1, x2) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_gegenbauer_l(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.eval_genlaguerre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_genlaguerre[double_complex](x0, x1, x2) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_genlaguerre[double](x0, x1, x2) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_genlaguerre_l(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double eval_hermite(long x0, double x1) noexcept nogil: + """See the documentation for scipy.special.eval_hermite""" + return _func_eval_hermite(x0, x1) + +cpdef double eval_hermitenorm(long x0, double x1) noexcept nogil: + """See the documentation for scipy.special.eval_hermitenorm""" + return _func_eval_hermitenorm(x0, x1) + +cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.eval_jacobi""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_jacobi[double_complex](x0, x1, x2, x3) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_jacobi[double](x0, x1, x2, x3) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_jacobi_l(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_laguerre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_laguerre[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_laguerre[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_laguerre_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_legendre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_legendre[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_legendre[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_legendre_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_sh_chebyt""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_chebyt[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_chebyt[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_chebyt_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_sh_chebyu""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_chebyu[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_chebyu[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_chebyu_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.eval_sh_jacobi""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_jacobi[double_complex](x0, x1, x2, x3) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_jacobi[double](x0, x1, x2, x3) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_jacobi_l(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_sh_legendre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_legendre[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_legendre[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_legendre_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t exp1(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.exp1""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cexp1_wrap(_complexstuff.npy_cdouble_from_double_complex(x0))) + elif Dd_number_t is double: + return _func_exp1_wrap(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double exp10(double x0) noexcept nogil: + """See the documentation for scipy.special.exp10""" + return _func_exp10(x0) + +cpdef double exp2(double x0) noexcept nogil: + """See the documentation for scipy.special.exp2""" + return _func_exp2(x0) + +cpdef Dd_number_t expi(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.expi""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cexpi_wrap(_complexstuff.npy_cdouble_from_double_complex(x0))) + elif Dd_number_t is double: + return _func_expi_wrap(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef dfg_number_t expit(dfg_number_t x0) noexcept nogil: + """See the documentation for scipy.special.expit""" + if dfg_number_t is double: + return (scipy.special._ufuncs_cxx._export_expit)(x0) + elif dfg_number_t is float: + return (scipy.special._ufuncs_cxx._export_expitf)(x0) + elif dfg_number_t is long_double: + return (scipy.special._ufuncs_cxx._export_expitl)(x0) + else: + if dfg_number_t is double: + return NAN + elif dfg_number_t is float: + return NAN + else: + return NAN + +cpdef Dd_number_t expm1(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.expm1""" + if Dd_number_t is double_complex: + return _func_cexpm1(x0) + elif Dd_number_t is double: + return _func_expm1(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double expn(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.expn""" + if dl_number_t is double: + return _func_expn_unsafe(x0, x1) + elif dl_number_t is long: + return _func_expn(x0, x1) + else: + return NAN + +cpdef double exprel(double x0) noexcept nogil: + """See the documentation for scipy.special.exprel""" + return _func_exprel(x0) + +cpdef double fdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtr""" + return _func_fdtr(x0, x1, x2) + +cpdef double fdtrc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtrc""" + return _func_fdtrc(x0, x1, x2) + +cpdef double fdtri(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtri""" + return _func_fdtri(x0, x1, x2) + +cpdef double fdtridfd(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtridfd""" + return _func_fdtridfd(x0, x1, x2) + +cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil: + """See the documentation for scipy.special.fresnel""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + if Dd_number_t is double: + _func_fresnl(x0, y0, y1) + elif Dd_number_t is double_complex: + _func_cfresnl_wrap(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + +def _fresnel_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + fresnel(x0, &y0, &y1) + return y0, y1 + +cpdef Dd_number_t gamma(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.gamma""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cgamma)(x0) + elif Dd_number_t is double: + return _func_Gamma(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double gammainc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammainc""" + return _func_igam(x0, x1) + +cpdef double gammaincc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammaincc""" + return _func_igamc(x0, x1) + +cpdef double gammainccinv(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammainccinv""" + return _func_igamci(x0, x1) + +cpdef double gammaincinv(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammaincinv""" + return _func_igami(x0, x1) + +cpdef double gammaln(double x0) noexcept nogil: + """See the documentation for scipy.special.gammaln""" + return _func_lgam(x0) + +cpdef double gammasgn(double x0) noexcept nogil: + """See the documentation for scipy.special.gammasgn""" + return _func_gammasgn(x0) + +cpdef double gdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtr""" + return _func_gdtr(x0, x1, x2) + +cpdef double gdtrc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtrc""" + return _func_gdtrc(x0, x1, x2) + +cpdef double gdtria(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtria""" + return _func_gdtria(x0, x1, x2) + +cpdef double gdtrib(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtrib""" + return _func_gdtrib(x0, x1, x2) + +cpdef double gdtrix(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtrix""" + return _func_gdtrix(x0, x1, x2) + +cpdef double complex hankel1(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel1""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap1(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double complex hankel1e(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel1e""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap1_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double complex hankel2(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel2""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap2(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double complex hankel2e(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel2e""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap2_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double huber(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.huber""" + return _func_huber(x0, x1) + +cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.hyp0f1""" + if Dd_number_t is double_complex: + return _func__hyp0f1_cmplx(x0, x1) + elif Dd_number_t is double: + return _func__hyp0f1_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.hyp1f1""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_hyp1f1_double)(x0, x1, x2) + elif Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_chyp1f1_wrap(x0, x1, _complexstuff.npy_cdouble_from_double_complex(x2))) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.hyp2f1""" + if Dd_number_t is double: + return _func_hyp2f1(x0, x1, x2, x3) + elif Dd_number_t is double_complex: + return _func_hyp2f1_complex(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double hyperu(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.hyperu""" + return _func_hyperu(x0, x1, x2) + +cpdef double i0(double x0) noexcept nogil: + """See the documentation for scipy.special.i0""" + return _func_i0(x0) + +cpdef double i0e(double x0) noexcept nogil: + """See the documentation for scipy.special.i0e""" + return _func_i0e(x0) + +cpdef double i1(double x0) noexcept nogil: + """See the documentation for scipy.special.i1""" + return _func_i1(x0) + +cpdef double i1e(double x0) noexcept nogil: + """See the documentation for scipy.special.i1e""" + return _func_i1e(x0) + +cpdef double inv_boxcox(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.inv_boxcox""" + return _func_inv_boxcox(x0, x1) + +cpdef double inv_boxcox1p(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.inv_boxcox1p""" + return _func_inv_boxcox1p(x0, x1) + +cdef void it2i0k0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.it2i0k0""" + _func_it2i0k0_wrap(x0, y0, y1) + +def _it2i0k0_pywrap(double x0): + cdef double y0 + cdef double y1 + it2i0k0(x0, &y0, &y1) + return y0, y1 + +cdef void it2j0y0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.it2j0y0""" + _func_it2j0y0_wrap(x0, y0, y1) + +def _it2j0y0_pywrap(double x0): + cdef double y0 + cdef double y1 + it2j0y0(x0, &y0, &y1) + return y0, y1 + +cpdef double it2struve0(double x0) noexcept nogil: + """See the documentation for scipy.special.it2struve0""" + return _func_it2struve0_wrap(x0) + +cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) noexcept nogil: + """See the documentation for scipy.special.itairy""" + _func_itairy_wrap(x0, y0, y1, y2, y3) + +def _itairy_pywrap(double x0): + cdef double y0 + cdef double y1 + cdef double y2 + cdef double y3 + itairy(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cdef void iti0k0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.iti0k0""" + _func_it1i0k0_wrap(x0, y0, y1) + +def _iti0k0_pywrap(double x0): + cdef double y0 + cdef double y1 + iti0k0(x0, &y0, &y1) + return y0, y1 + +cdef void itj0y0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.itj0y0""" + _func_it1j0y0_wrap(x0, y0, y1) + +def _itj0y0_pywrap(double x0): + cdef double y0 + cdef double y1 + itj0y0(x0, &y0, &y1) + return y0, y1 + +cpdef double itmodstruve0(double x0) noexcept nogil: + """See the documentation for scipy.special.itmodstruve0""" + return _func_itmodstruve0_wrap(x0) + +cpdef double itstruve0(double x0) noexcept nogil: + """See the documentation for scipy.special.itstruve0""" + return _func_itstruve0_wrap(x0) + +cpdef Dd_number_t iv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.iv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesi_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_iv(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t ive(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.ive""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesi_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesi_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double j0(double x0) noexcept nogil: + """See the documentation for scipy.special.j0""" + return _func_j0(x0) + +cpdef double j1(double x0) noexcept nogil: + """See the documentation for scipy.special.j1""" + return _func_j1(x0) + +cpdef Dd_number_t jv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.jv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesj_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesj_wrap_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t jve(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.jve""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesj_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesj_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double k0(double x0) noexcept nogil: + """See the documentation for scipy.special.k0""" + return _func_k0(x0) + +cpdef double k0e(double x0) noexcept nogil: + """See the documentation for scipy.special.k0e""" + return _func_k0e(x0) + +cpdef double k1(double x0) noexcept nogil: + """See the documentation for scipy.special.k1""" + return _func_k1(x0) + +cpdef double k1e(double x0) noexcept nogil: + """See the documentation for scipy.special.k1e""" + return _func_k1e(x0) + +cpdef double kei(double x0) noexcept nogil: + """See the documentation for scipy.special.kei""" + return _func_kei_wrap(x0) + +cpdef double keip(double x0) noexcept nogil: + """See the documentation for scipy.special.keip""" + return _func_keip_wrap(x0) + +cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) noexcept nogil: + """See the documentation for scipy.special.kelvin""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + cdef npy_cdouble tmp2 + cdef npy_cdouble tmp3 + _func_kelvin_wrap(x0, &tmp0, &tmp1, &tmp2, &tmp3) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2) + y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3) + +def _kelvin_pywrap(double x0): + cdef double complex y0 + cdef double complex y1 + cdef double complex y2 + cdef double complex y3 + kelvin(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cpdef double ker(double x0) noexcept nogil: + """See the documentation for scipy.special.ker""" + return _func_ker_wrap(x0) + +cpdef double kerp(double x0) noexcept nogil: + """See the documentation for scipy.special.kerp""" + return _func_kerp_wrap(x0) + +cpdef double kl_div(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.kl_div""" + return _func_kl_div(x0, x1) + +cpdef double kn(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.kn""" + if dl_number_t is double: + return _func_kn_unsafe(x0, x1) + elif dl_number_t is long: + return _func_cbesk_wrap_real_int(x0, x1) + else: + return NAN + +cpdef double kolmogi(double x0) noexcept nogil: + """See the documentation for scipy.special.kolmogi""" + return _func_kolmogi(x0) + +cpdef double kolmogorov(double x0) noexcept nogil: + """See the documentation for scipy.special.kolmogorov""" + return _func_kolmogorov(x0) + +cpdef Dd_number_t kv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.kv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesk_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesk_wrap_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t kve(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.kve""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesk_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesk_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t log1p(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.log1p""" + if Dd_number_t is double_complex: + return _func_clog1p(x0) + elif Dd_number_t is double: + return _func_log1p(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef dfg_number_t log_expit(dfg_number_t x0) noexcept nogil: + """See the documentation for scipy.special.log_expit""" + if dfg_number_t is double: + return (scipy.special._ufuncs_cxx._export_log_expit)(x0) + elif dfg_number_t is float: + return (scipy.special._ufuncs_cxx._export_log_expitf)(x0) + elif dfg_number_t is long_double: + return (scipy.special._ufuncs_cxx._export_log_expitl)(x0) + else: + if dfg_number_t is double: + return NAN + elif dfg_number_t is float: + return NAN + else: + return NAN + +cpdef Dd_number_t log_ndtr(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.log_ndtr""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t loggamma(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.loggamma""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_loggamma_real)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_loggamma)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef dfg_number_t logit(dfg_number_t x0) noexcept nogil: + """See the documentation for scipy.special.logit""" + if dfg_number_t is double: + return (scipy.special._ufuncs_cxx._export_logit)(x0) + elif dfg_number_t is float: + return (scipy.special._ufuncs_cxx._export_logitf)(x0) + elif dfg_number_t is long_double: + return (scipy.special._ufuncs_cxx._export_logitl)(x0) + else: + if dfg_number_t is double: + return NAN + elif dfg_number_t is float: + return NAN + else: + return NAN + +cpdef double lpmv(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.lpmv""" + return _func_pmv_wrap(x0, x1, x2) + +cpdef double mathieu_a(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.mathieu_a""" + return _func_cem_cva_wrap(x0, x1) + +cpdef double mathieu_b(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.mathieu_b""" + return _func_sem_cva_wrap(x0, x1) + +cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_cem""" + _func_cem_wrap(x0, x1, x2, y0, y1) + +def _mathieu_cem_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_cem(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modcem1""" + _func_mcm1_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modcem1_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modcem1(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modcem2""" + _func_mcm2_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modcem2_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modcem2(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modsem1""" + _func_msm1_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modsem1_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modsem1(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modsem2""" + _func_msm2_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modsem2_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modsem2(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_sem""" + _func_sem_wrap(x0, x1, x2, y0, y1) + +def _mathieu_sem_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_sem(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void modfresnelm(double x0, double complex *y0, double complex *y1) noexcept nogil: + """See the documentation for scipy.special.modfresnelm""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + _func_modified_fresnel_minus_wrap(x0, &tmp0, &tmp1) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + +def _modfresnelm_pywrap(double x0): + cdef double complex y0 + cdef double complex y1 + modfresnelm(x0, &y0, &y1) + return y0, y1 + +cdef void modfresnelp(double x0, double complex *y0, double complex *y1) noexcept nogil: + """See the documentation for scipy.special.modfresnelp""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + _func_modified_fresnel_plus_wrap(x0, &tmp0, &tmp1) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + +def _modfresnelp_pywrap(double x0): + cdef double complex y0 + cdef double complex y1 + modfresnelp(x0, &y0, &y1) + return y0, y1 + +cpdef double modstruve(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.modstruve""" + return _func_struve_l(x0, x1) + +cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtr""" + if dl_number_t is double: + return _func_nbdtr_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_nbdtr(x0, x1, x2) + else: + return NAN + +cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtrc""" + if dl_number_t is double: + return _func_nbdtrc_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_nbdtrc(x0, x1, x2) + else: + return NAN + +cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtri""" + if dl_number_t is double: + return _func_nbdtri_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_nbdtri(x0, x1, x2) + else: + return NAN + +cpdef double nbdtrik(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtrik""" + return _func_nbdtrik(x0, x1, x2) + +cpdef double nbdtrin(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtrin""" + return _func_nbdtrin(x0, x1, x2) + +cpdef double ncfdtr(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtr""" + return _func_ncfdtr(x0, x1, x2, x3) + +cpdef double ncfdtri(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtri""" + return _func_ncfdtri(x0, x1, x2, x3) + +cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtridfd""" + return _func_ncfdtridfd(x0, x1, x2, x3) + +cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtridfn""" + return _func_ncfdtridfn(x0, x1, x2, x3) + +cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtrinc""" + return _func_ncfdtrinc(x0, x1, x2, x3) + +cpdef double nctdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtr""" + return _func_nctdtr(x0, x1, x2) + +cpdef double nctdtridf(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtridf""" + return _func_nctdtridf(x0, x1, x2) + +cpdef double nctdtrinc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtrinc""" + return _func_nctdtrinc(x0, x1, x2) + +cpdef double nctdtrit(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtrit""" + return _func_nctdtrit(x0, x1, x2) + +cpdef Dd_number_t ndtr(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.ndtr""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_ndtr)(x0) + elif Dd_number_t is double: + return _func_ndtr(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double ndtri(double x0) noexcept nogil: + """See the documentation for scipy.special.ndtri""" + return _func_ndtri(x0) + +cpdef double nrdtrimn(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nrdtrimn""" + return _func_nrdtrimn(x0, x1, x2) + +cpdef double nrdtrisd(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nrdtrisd""" + return _func_nrdtrisd(x0, x1, x2) + +cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_ang1""" + y0[0] = _func_oblate_aswfa_nocv_wrap(x0, x1, x2, x3, y1) + +def _obl_ang1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + obl_ang1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_ang1_cv""" + _func_oblate_aswfa_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _obl_ang1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + obl_ang1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double obl_cv(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.obl_cv""" + return _func_oblate_segv_wrap(x0, x1, x2) + +cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad1""" + y0[0] = _func_oblate_radial1_nocv_wrap(x0, x1, x2, x3, y1) + +def _obl_rad1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + obl_rad1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad1_cv""" + _func_oblate_radial1_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _obl_rad1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + obl_rad1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad2""" + y0[0] = _func_oblate_radial2_nocv_wrap(x0, x1, x2, x3, y1) + +def _obl_rad2_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + obl_rad2(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad2_cv""" + _func_oblate_radial2_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _obl_rad2_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + obl_rad2_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double owens_t(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.owens_t""" + return _func_owens_t(x0, x1) + +cdef void pbdv(double x0, double x1, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pbdv""" + _func_pbdv_wrap(x0, x1, y0, y1) + +def _pbdv_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + pbdv(x0, x1, &y0, &y1) + return y0, y1 + +cdef void pbvv(double x0, double x1, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pbvv""" + _func_pbvv_wrap(x0, x1, y0, y1) + +def _pbvv_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + pbvv(x0, x1, &y0, &y1) + return y0, y1 + +cdef void pbwa(double x0, double x1, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pbwa""" + _func_pbwa_wrap(x0, x1, y0, y1) + +def _pbwa_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + pbwa(x0, x1, &y0, &y1) + return y0, y1 + +cpdef double pdtr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtr""" + return _func_pdtr(x0, x1) + +cpdef double pdtrc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtrc""" + return _func_pdtrc(x0, x1) + +cpdef double pdtri(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtri""" + if dl_number_t is double: + return _func_pdtri_unsafe(x0, x1) + elif dl_number_t is long: + return _func_pdtri(x0, x1) + else: + return NAN + +cpdef double pdtrik(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtrik""" + return _func_pdtrik(x0, x1) + +cpdef double poch(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.poch""" + return _func_poch(x0, x1) + +cpdef df_number_t powm1(df_number_t x0, df_number_t x1) noexcept nogil: + """See the documentation for scipy.special.powm1""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_powm1_float)(x0, x1) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_powm1_double)(x0, x1) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_ang1""" + y0[0] = _func_prolate_aswfa_nocv_wrap(x0, x1, x2, x3, y1) + +def _pro_ang1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + pro_ang1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_ang1_cv""" + _func_prolate_aswfa_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _pro_ang1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + pro_ang1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double pro_cv(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.pro_cv""" + return _func_prolate_segv_wrap(x0, x1, x2) + +cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad1""" + y0[0] = _func_prolate_radial1_nocv_wrap(x0, x1, x2, x3, y1) + +def _pro_rad1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + pro_rad1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad1_cv""" + _func_prolate_radial1_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _pro_rad1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + pro_rad1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad2""" + y0[0] = _func_prolate_radial2_nocv_wrap(x0, x1, x2, x3, y1) + +def _pro_rad2_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + pro_rad2(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad2_cv""" + _func_prolate_radial2_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _pro_rad2_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + pro_rad2_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double pseudo_huber(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pseudo_huber""" + return _func_pseudo_huber(x0, x1) + +cpdef Dd_number_t psi(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.psi""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cdigamma)(x0) + elif Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_digamma)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double radian(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.radian""" + return _func_radian(x0, x1, x2) + +cpdef double rel_entr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.rel_entr""" + return _func_rel_entr(x0, x1) + +cpdef Dd_number_t rgamma(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.rgamma""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_crgamma)(x0) + elif Dd_number_t is double: + return _func_rgamma(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double round(double x0) noexcept nogil: + """See the documentation for scipy.special.round""" + return _func_round(x0) + +cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil: + """See the documentation for scipy.special.shichi""" + if Dd_number_t is double_complex: + _func_cshichi(x0, y0, y1) + elif Dd_number_t is double: + _func_shichi(x0, y0, y1) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + +def _shichi_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + shichi(x0, &y0, &y1) + return y0, y1 + +cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil: + """See the documentation for scipy.special.sici""" + if Dd_number_t is double_complex: + _func_csici(x0, y0, y1) + elif Dd_number_t is double: + _func_sici(x0, y0, y1) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + +def _sici_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + sici(x0, &y0, &y1) + return y0, y1 + +cpdef double sindg(double x0) noexcept nogil: + """See the documentation for scipy.special.sindg""" + return _func_sindg(x0) + +cpdef double smirnov(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.smirnov""" + if dl_number_t is double: + return _func_smirnov_unsafe(x0, x1) + elif dl_number_t is long: + return _func_smirnov(x0, x1) + else: + return NAN + +cpdef double smirnovi(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.smirnovi""" + if dl_number_t is double: + return _func_smirnovi_unsafe(x0, x1) + elif dl_number_t is long: + return _func_smirnovi(x0, x1) + else: + return NAN + +cpdef Dd_number_t spence(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.spence""" + if Dd_number_t is double_complex: + return _func_cspence(x0) + elif Dd_number_t is double: + return _func_spence(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.sph_harm""" + if dl_number_t is double: + return _func_sph_harmonic_unsafe(x0, x1, x2, x3) + elif dl_number_t is long: + return _func_sph_harmonic(x0, x1, x2, x3) + else: + return NAN + +cpdef double stdtr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.stdtr""" + return _func_stdtr(x0, x1) + +cpdef double stdtridf(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.stdtridf""" + return _func_stdtridf(x0, x1) + +cpdef double stdtrit(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.stdtrit""" + return _func_stdtrit(x0, x1) + +cpdef double struve(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.struve""" + return _func_struve_h(x0, x1) + +cpdef double tandg(double x0) noexcept nogil: + """See the documentation for scipy.special.tandg""" + return _func_tandg(x0) + +cpdef double tklmbda(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.tklmbda""" + return _func_tukeylambdacdf(x0, x1) + +cpdef double complex wofz(double complex x0) noexcept nogil: + """See the documentation for scipy.special.wofz""" + return (scipy.special._ufuncs_cxx._export_faddeeva_w)(x0) + +cpdef Dd_number_t wrightomega(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.wrightomega""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_wrightomega)(x0) + elif Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_wrightomega_real)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.xlog1py""" + if Dd_number_t is double: + return _func_xlog1py[double](x0, x1) + elif Dd_number_t is double_complex: + return _func_xlog1py[double_complex](x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.xlogy""" + if Dd_number_t is double: + return _func_xlogy[double](x0, x1) + elif Dd_number_t is double_complex: + return _func_xlogy[double_complex](x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double y0(double x0) noexcept nogil: + """See the documentation for scipy.special.y0""" + return _func_y0(x0) + +cpdef double y1(double x0) noexcept nogil: + """See the documentation for scipy.special.y1""" + return _func_y1(x0) + +cpdef double yn(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.yn""" + if dl_number_t is double: + return _func_yn_unsafe(x0, x1) + elif dl_number_t is long: + return _func_yn(x0, x1) + else: + return NAN + +cpdef Dd_number_t yv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.yv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesy_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesy_wrap_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t yve(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.yve""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesy_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesy_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double zetac(double x0) noexcept nogil: + """See the documentation for scipy.special.zetac""" + return _func_zetac(x0) + +cpdef double wright_bessel(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.wright_bessel""" + return _func_wright_bessel_scalar(x0, x1, x2) + +cpdef double ndtri_exp(double x0) noexcept nogil: + """See the documentation for scipy.special.ndtri_exp""" + return _func_ndtri_exp(x0) + +def _bench_airy_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.airy(x0) + +def _bench_airy_d_cy(int N, double x0): + cdef int n + cdef double y0 + cdef double y1 + cdef double y2 + cdef double y3 + for n in range(N): + airy(x0, &y0, &y1, &y2, &y3) + +def _bench_airy_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.airy(x0) + +def _bench_airy_D_cy(int N, double complex x0): + cdef int n + cdef double complex y0 + cdef double complex y1 + cdef double complex y2 + cdef double complex y3 + for n in range(N): + airy(x0, &y0, &y1, &y2, &y3) + +def _bench_beta_dd_py(int N, double x0, double x1): + cdef int n + for n in range(N): + _ufuncs.beta(x0, x1) + +def _bench_beta_dd_cy(int N, double x0, double x1): + cdef int n + for n in range(N): + beta(x0, x1) + +def _bench_erf_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.erf(x0) + +def _bench_erf_d_cy(int N, double x0): + cdef int n + for n in range(N): + erf(x0) + +def _bench_erf_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.erf(x0) + +def _bench_erf_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + erf(x0) + +def _bench_exprel_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.exprel(x0) + +def _bench_exprel_d_cy(int N, double x0): + cdef int n + for n in range(N): + exprel(x0) + +def _bench_gamma_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.gamma(x0) + +def _bench_gamma_d_cy(int N, double x0): + cdef int n + for n in range(N): + gamma(x0) + +def _bench_gamma_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.gamma(x0) + +def _bench_gamma_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + gamma(x0) + +def _bench_jv_dd_py(int N, double x0, double x1): + cdef int n + for n in range(N): + _ufuncs.jv(x0, x1) + +def _bench_jv_dd_cy(int N, double x0, double x1): + cdef int n + for n in range(N): + jv(x0, x1) + +def _bench_jv_dD_py(int N, double x0, double complex x1): + cdef int n + for n in range(N): + _ufuncs.jv(x0, x1) + +def _bench_jv_dD_cy(int N, double x0, double complex x1): + cdef int n + for n in range(N): + jv(x0, x1) + +def _bench_loggamma_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.loggamma(x0) + +def _bench_loggamma_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + loggamma(x0) + +def _bench_logit_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.logit(x0) + +def _bench_logit_d_cy(int N, double x0): + cdef int n + for n in range(N): + logit(x0) + +def _bench_psi_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.psi(x0) + +def _bench_psi_d_cy(int N, double x0): + cdef int n + for n in range(N): + psi(x0) + +def _bench_psi_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.psi(x0) + +def _bench_psi_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + psi(x0) \ No newline at end of file diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf37fc13f73c57ab80960fc425280c0105ac7690 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a0aa765ac1e62ea0c0cc5dd15ded44e5d404707 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7e11e4b865acd18fd01932912797d65efe1cb4d Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd6984a7657b58d1e8ade82f2896bc141273041b Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logit.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81d20f711ca571d5076dffe4434c04016c078330 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logit.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b0cd68dbe27cd76a4188612ec344848de12ced9 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sici.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sici.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b762dc4794add2669e843fef727cdf445e710f55 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sici.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76f8e0184c988547e737efaaefb8567f3f6ae9ba Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88220d53261f58d7b16bdc510ac45a709ee80a63 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0796c6fbdb08bcb604c3386126540598cb5d399 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..606979ef909ab82848c40798b7207e24ea6874e4 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_basic.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..d941ef16254e1e796b75421e76c61d4b8506cdc0 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_basic.py @@ -0,0 +1,4324 @@ +# this program corresponds to special.py + +### Means test is not done yet +# E Means test is giving error (E) +# F Means test is failing (F) +# EF Means test is giving error and Failing +#! Means test is segfaulting +# 8 Means test runs forever + +### test_besselpoly +### test_mathieu_a +### test_mathieu_even_coef +### test_mathieu_odd_coef +### test_modfresnelp +### test_modfresnelm +# test_pbdv_seq +### test_pbvv_seq +### test_sph_harm + +import functools +import itertools +import operator +import platform +import sys + +import numpy as np +from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, + log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, double, + array_equal) + +import pytest +from pytest import raises as assert_raises +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_equal, assert_array_almost_equal, assert_approx_equal, + assert_, assert_allclose, assert_array_almost_equal_nulp, + suppress_warnings) + +from scipy import special +import scipy.special._ufuncs as cephes +from scipy.special import ellipe, ellipk, ellipkm1 +from scipy.special import elliprc, elliprd, elliprf, elliprg, elliprj +from scipy.special import mathieu_odd_coef, mathieu_even_coef, stirling2 +from scipy._lib.deprecation import _NoValue +from scipy._lib._util import np_long, np_ulong + +from scipy.special._basic import _FACTORIALK_LIMITS_64BITS, \ + _FACTORIALK_LIMITS_32BITS +from scipy.special._testutils import with_special_errors, \ + assert_func_equal, FuncData + +import math + + +class TestCephes: + def test_airy(self): + cephes.airy(0) + + def test_airye(self): + cephes.airye(0) + + def test_binom(self): + n = np.array([0.264, 4, 5.2, 17]) + k = np.array([2, 0.4, 7, 3.3]) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389, + -0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846], + [10.92, 2.22993515861399, -0.00585728, 10.468891352063146], + [136, 3.5252179590758828, 19448, 1024.5526916174495]]) + assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13) + + # Test branches in implementation + np.random.seed(1234) + n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500] + k = np.arange(0, 102) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + + assert_func_equal(cephes.binom, + cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)), + nk, + atol=1e-10, rtol=1e-10) + + def test_binom_2(self): + # Test branches in implementation + np.random.seed(1234) + n = np.r_[np.logspace(1, 300, 20)] + k = np.arange(0, 102) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + + assert_func_equal(cephes.binom, + cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)), + nk, + atol=1e-10, rtol=1e-10) + + def test_binom_exact(self): + @np.vectorize + def binom_int(n, k): + n = int(n) + k = int(k) + num = 1 + den = 1 + for i in range(1, k+1): + num *= i + n - k + den *= i + return float(num/den) + + np.random.seed(1234) + n = np.arange(1, 15) + k = np.arange(0, 15) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + nk = nk[nk[:,0] >= nk[:,1]] + assert_func_equal(cephes.binom, + binom_int(nk[:,0], nk[:,1]), + nk, + atol=0, rtol=0) + + def test_binom_nooverflow_8346(self): + # Test (binom(n, k) doesn't overflow prematurely */ + dataset = [ + (1000, 500, 2.70288240945436551e+299), + (1002, 501, 1.08007396880791225e+300), + (1004, 502, 4.31599279169058121e+300), + (1006, 503, 1.72468101616263781e+301), + (1008, 504, 6.89188009236419153e+301), + (1010, 505, 2.75402257948335448e+302), + (1012, 506, 1.10052048531923757e+303), + (1014, 507, 4.39774063758732849e+303), + (1016, 508, 1.75736486108312519e+304), + (1018, 509, 7.02255427788423734e+304), + (1020, 510, 2.80626776829962255e+305), + (1022, 511, 1.12140876377061240e+306), + (1024, 512, 4.48125455209897109e+306), + (1026, 513, 1.79075474304149900e+307), + (1028, 514, 7.15605105487789676e+307) + ] + dataset = np.asarray(dataset) + FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check() + + def test_bdtr(self): + assert_equal(cephes.bdtr(1,1,0.5),1.0) + + def test_bdtri(self): + assert_equal(cephes.bdtri(1,3,0.5),0.5) + + def test_bdtrc(self): + assert_equal(cephes.bdtrc(1,3,0.5),0.5) + + def test_bdtrin(self): + assert_equal(cephes.bdtrin(1,0,1),5.0) + + def test_bdtrik(self): + cephes.bdtrik(1,3,0.5) + + def test_bei(self): + assert_equal(cephes.bei(0),0.0) + + def test_beip(self): + assert_equal(cephes.beip(0),0.0) + + def test_ber(self): + assert_equal(cephes.ber(0),1.0) + + def test_berp(self): + assert_equal(cephes.berp(0),0.0) + + def test_besselpoly(self): + assert_equal(cephes.besselpoly(0,0,0),1.0) + + def test_btdtr(self): + with pytest.deprecated_call(match='deprecated in SciPy 1.12.0'): + y = special.btdtr(1, 1, 1) + assert_equal(y, 1.0) + + def test_btdtri(self): + with pytest.deprecated_call(match='deprecated in SciPy 1.12.0'): + y = special.btdtri(1, 1, 1) + assert_equal(y, 1.0) + + def test_btdtria(self): + assert_equal(cephes.btdtria(1,1,1),5.0) + + def test_btdtrib(self): + assert_equal(cephes.btdtrib(1,1,1),5.0) + + def test_cbrt(self): + assert_approx_equal(cephes.cbrt(1),1.0) + + def test_chdtr(self): + assert_equal(cephes.chdtr(1,0),0.0) + + def test_chdtrc(self): + assert_equal(cephes.chdtrc(1,0),1.0) + + def test_chdtri(self): + assert_equal(cephes.chdtri(1,1),0.0) + + def test_chdtriv(self): + assert_equal(cephes.chdtriv(0,0),5.0) + + def test_chndtr(self): + assert_equal(cephes.chndtr(0,1,0),0.0) + + # Each row holds (x, nu, lam, expected_value) + # These values were computed using Wolfram Alpha with + # CDF[NoncentralChiSquareDistribution[nu, lam], x] + values = np.array([ + [25.00, 20.0, 400, 4.1210655112396197139e-57], + [25.00, 8.00, 250, 2.3988026526832425878e-29], + [0.001, 8.00, 40., 5.3761806201366039084e-24], + [0.010, 8.00, 40., 5.45396231055999457039e-20], + [20.00, 2.00, 107, 1.39390743555819597802e-9], + [22.50, 2.00, 107, 7.11803307138105870671e-9], + [25.00, 2.00, 107, 3.11041244829864897313e-8], + [3.000, 2.00, 1.0, 0.62064365321954362734], + [350.0, 300., 10., 0.93880128006276407710], + [100.0, 13.5, 10., 0.99999999650104210949], + [700.0, 20.0, 400, 0.99999999925680650105], + [150.0, 13.5, 10., 0.99999999999999983046], + [160.0, 13.5, 10., 0.99999999999999999518], # 1.0 + ]) + cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2]) + assert_allclose(cdf, values[:, 3], rtol=1e-12) + + assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0) + assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0) + assert_(np.isnan(cephes.chndtr(np.nan, 1, 2))) + assert_(np.isnan(cephes.chndtr(5, np.nan, 2))) + assert_(np.isnan(cephes.chndtr(5, 1, np.nan))) + + def test_chndtridf(self): + assert_equal(cephes.chndtridf(0,0,1),5.0) + + def test_chndtrinc(self): + assert_equal(cephes.chndtrinc(0,1,0),5.0) + + def test_chndtrix(self): + assert_equal(cephes.chndtrix(0,1,0),0.0) + + def test_cosdg(self): + assert_equal(cephes.cosdg(0),1.0) + + def test_cosm1(self): + assert_equal(cephes.cosm1(0),0.0) + + def test_cotdg(self): + assert_almost_equal(cephes.cotdg(45),1.0) + + def test_dawsn(self): + assert_equal(cephes.dawsn(0),0.0) + assert_allclose(cephes.dawsn(1.23), 0.50053727749081767) + + def test_diric(self): + # Test behavior near multiples of 2pi. Regression test for issue + # described in gh-4001. + n_odd = [1, 5, 25] + x = np.array(2*np.pi + 5e-5).astype(np.float32) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7) + x = np.array(2*np.pi + 1e-9).astype(np.float64) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15) + x = np.array(2*np.pi + 1e-15).astype(np.float64) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15) + if hasattr(np, 'float128'): + # No float128 available in 32-bit numpy + x = np.array(2*np.pi + 1e-12).astype(np.float128) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19) + + n_even = [2, 4, 24] + x = np.array(2*np.pi + 1e-9).astype(np.float64) + assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15) + + # Test at some values not near a multiple of pi + x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi) + octave_result = [0.872677996249965, 0.539344662916632, + 0.127322003750035, -0.206011329583298] + assert_almost_equal(special.diric(x, 3), octave_result, decimal=15) + + def test_diric_broadcasting(self): + x = np.arange(5) + n = np.array([1, 3, 7]) + assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size)) + + def test_ellipe(self): + assert_equal(cephes.ellipe(1),1.0) + + def test_ellipeinc(self): + assert_equal(cephes.ellipeinc(0,1),0.0) + + def test_ellipj(self): + cephes.ellipj(0,1) + + def test_ellipk(self): + assert_allclose(ellipk(0), pi/2) + + def test_ellipkinc(self): + assert_equal(cephes.ellipkinc(0,0),0.0) + + def test_erf(self): + assert_equal(cephes.erf(0), 0.0) + + def test_erf_symmetry(self): + x = 5.905732037710919 + assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0) + + def test_erfc(self): + assert_equal(cephes.erfc(0), 1.0) + + def test_exp10(self): + assert_approx_equal(cephes.exp10(2),100.0) + + def test_exp2(self): + assert_equal(cephes.exp2(2),4.0) + + def test_expm1(self): + assert_equal(cephes.expm1(0),0.0) + assert_equal(cephes.expm1(np.inf), np.inf) + assert_equal(cephes.expm1(-np.inf), -1) + assert_equal(cephes.expm1(np.nan), np.nan) + + def test_expm1_complex(self): + expm1 = cephes.expm1 + assert_equal(expm1(0 + 0j), 0 + 0j) + assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0)) + assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf)) + assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf)) + assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf)) + assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf)) + assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan)) + assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0)) + assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0)) + assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan)) + assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan)) + + @pytest.mark.xfail(reason='The real part of expm1(z) bad at these points') + def test_expm1_complex_hard(self): + # The real part of this function is difficult to evaluate when + # z.real = -log(cos(z.imag)). + y = np.array([0.1, 0.2, 0.3, 5, 11, 20]) + x = -np.log(np.cos(y)) + z = x + 1j*y + + # evaluate using mpmath.expm1 with dps=1000 + expected = np.array([-5.5507901846769623e-17+0.10033467208545054j, + 2.4289354732893695e-18+0.20271003550867248j, + 4.5235500262585768e-17+0.30933624960962319j, + 7.8234305217489006e-17-3.3805150062465863j, + -1.3685191953697676e-16-225.95084645419513j, + 8.7175620481291045e-17+2.2371609442247422j]) + found = cephes.expm1(z) + # this passes. + assert_array_almost_equal_nulp(found.imag, expected.imag, 3) + # this fails. + assert_array_almost_equal_nulp(found.real, expected.real, 20) + + def test_fdtr(self): + assert_equal(cephes.fdtr(1, 1, 0), 0.0) + # Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10] + assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488, + rtol=1e-12) + + def test_fdtrc(self): + assert_equal(cephes.fdtrc(1, 1, 0), 1.0) + # Computed using Wolfram Alpha: + # 1 - CDF[FRatioDistribution[2, 1/10], 1e10] + assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512, + rtol=1e-12) + + def test_fdtri(self): + assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]), + array([0.9937365, 1.00630298]), rtol=1e-6) + # From Wolfram Alpha: + # CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874... + p = 0.8756751669632105666874 + assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12) + + @pytest.mark.xfail(reason='Returns nan on i686.') + def test_fdtri_mysterious_failure(self): + assert_allclose(cephes.fdtri(1, 1, 0.5), 1) + + def test_fdtridfd(self): + assert_equal(cephes.fdtridfd(1,0,0),5.0) + + def test_fresnel(self): + assert_equal(cephes.fresnel(0),(0.0,0.0)) + + def test_gamma(self): + assert_equal(cephes.gamma(5),24.0) + + def test_gammainccinv(self): + assert_equal(cephes.gammainccinv(5,1),0.0) + + def test_gammaln(self): + cephes.gammaln(10) + + def test_gammasgn(self): + vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64) + assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals))) + + def test_gdtr(self): + assert_equal(cephes.gdtr(1,1,0),0.0) + + def test_gdtr_inf(self): + assert_equal(cephes.gdtr(1,1,np.inf),1.0) + + def test_gdtrc(self): + assert_equal(cephes.gdtrc(1,1,0),1.0) + + def test_gdtria(self): + assert_equal(cephes.gdtria(0,1,1),0.0) + + def test_gdtrib(self): + cephes.gdtrib(1,0,1) + # assert_equal(cephes.gdtrib(1,0,1),5.0) + + def test_gdtrix(self): + cephes.gdtrix(1,1,.1) + + def test_hankel1(self): + cephes.hankel1(1,1) + + def test_hankel1e(self): + cephes.hankel1e(1,1) + + def test_hankel2(self): + cephes.hankel2(1,1) + + def test_hankel2e(self): + cephes.hankel2e(1,1) + + def test_hyp1f1(self): + assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0)) + assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095) + cephes.hyp1f1(1,1,1) + + def test_hyp2f1(self): + assert_equal(cephes.hyp2f1(1,1,1,0),1.0) + + def test_i0(self): + assert_equal(cephes.i0(0),1.0) + + def test_i0e(self): + assert_equal(cephes.i0e(0),1.0) + + def test_i1(self): + assert_equal(cephes.i1(0),0.0) + + def test_i1e(self): + assert_equal(cephes.i1e(0),0.0) + + def test_it2i0k0(self): + cephes.it2i0k0(1) + + def test_it2j0y0(self): + cephes.it2j0y0(1) + + def test_it2struve0(self): + cephes.it2struve0(1) + + def test_itairy(self): + cephes.itairy(1) + + def test_iti0k0(self): + assert_equal(cephes.iti0k0(0),(0.0,0.0)) + + def test_itj0y0(self): + assert_equal(cephes.itj0y0(0),(0.0,0.0)) + + def test_itmodstruve0(self): + assert_equal(cephes.itmodstruve0(0),0.0) + + def test_itstruve0(self): + assert_equal(cephes.itstruve0(0),0.0) + + def test_iv(self): + assert_equal(cephes.iv(1,0),0.0) + + def test_ive(self): + assert_equal(cephes.ive(1,0),0.0) + + def test_j0(self): + assert_equal(cephes.j0(0),1.0) + + def test_j1(self): + assert_equal(cephes.j1(0),0.0) + + def test_jn(self): + assert_equal(cephes.jn(0,0),1.0) + + def test_jv(self): + assert_equal(cephes.jv(0,0),1.0) + + def test_jve(self): + assert_equal(cephes.jve(0,0),1.0) + + def test_k0(self): + cephes.k0(2) + + def test_k0e(self): + cephes.k0e(2) + + def test_k1(self): + cephes.k1(2) + + def test_k1e(self): + cephes.k1e(2) + + def test_kei(self): + cephes.kei(2) + + def test_keip(self): + assert_equal(cephes.keip(0),0.0) + + def test_ker(self): + cephes.ker(2) + + def test_kerp(self): + cephes.kerp(2) + + def test_kelvin(self): + cephes.kelvin(2) + + def test_kn(self): + cephes.kn(1,1) + + def test_kolmogi(self): + assert_equal(cephes.kolmogi(1),0.0) + assert_(np.isnan(cephes.kolmogi(np.nan))) + + def test_kolmogorov(self): + assert_equal(cephes.kolmogorov(0), 1.0) + + def test_kolmogp(self): + assert_equal(cephes._kolmogp(0), -0.0) + + def test_kolmogc(self): + assert_equal(cephes._kolmogc(0), 0.0) + + def test_kolmogci(self): + assert_equal(cephes._kolmogci(0), 0.0) + assert_(np.isnan(cephes._kolmogci(np.nan))) + + def test_kv(self): + cephes.kv(1,1) + + def test_kve(self): + cephes.kve(1,1) + + def test_log1p(self): + log1p = cephes.log1p + assert_equal(log1p(0), 0.0) + assert_equal(log1p(-1), -np.inf) + assert_equal(log1p(-2), np.nan) + assert_equal(log1p(np.inf), np.inf) + + def test_log1p_complex(self): + log1p = cephes.log1p + c = complex + assert_equal(log1p(0 + 0j), 0 + 0j) + assert_equal(log1p(c(-1, 0)), c(-np.inf, 0)) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2)) + assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan)) + assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi)) + assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0)) + assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4)) + assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4)) + assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan)) + assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan)) + assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan)) + assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan)) + assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan)) + + def test_lpmv(self): + assert_equal(cephes.lpmv(0,0,1),1.0) + + def test_mathieu_a(self): + assert_equal(cephes.mathieu_a(1,0),1.0) + + def test_mathieu_b(self): + assert_equal(cephes.mathieu_b(1,0),1.0) + + def test_mathieu_cem(self): + assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0)) + + # Test AMS 20.2.27 + @np.vectorize + def ce_smallq(m, q, z): + z *= np.pi/180 + if m == 0: + # + O(q^2) + return 2**(-0.5) * (1 - .5*q*cos(2*z)) + elif m == 1: + # + O(q^2) + return cos(z) - q/8 * cos(3*z) + elif m == 2: + # + O(q^2) + return cos(2*z) - q*(cos(4*z)/12 - 1/4) + else: + # + O(q^2) + return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) + m = np.arange(0, 100) + q = np.r_[0, np.logspace(-30, -9, 10)] + assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0], + ce_smallq(m[:,None], q[None,:], 0.123), + rtol=1e-14, atol=0) + + def test_mathieu_sem(self): + assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0)) + + # Test AMS 20.2.27 + @np.vectorize + def se_smallq(m, q, z): + z *= np.pi/180 + if m == 1: + # + O(q^2) + return sin(z) - q/8 * sin(3*z) + elif m == 2: + # + O(q^2) + return sin(2*z) - q*sin(4*z)/12 + else: + # + O(q^2) + return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) + m = np.arange(1, 100) + q = np.r_[0, np.logspace(-30, -9, 10)] + assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0], + se_smallq(m[:,None], q[None,:], 0.123), + rtol=1e-14, atol=0) + + def test_mathieu_modcem1(self): + assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0)) + + def test_mathieu_modcem2(self): + cephes.mathieu_modcem2(1,1,1) + + # Test reflection relation AMS 20.6.19 + m = np.arange(0, 4)[:,None,None] + q = np.r_[np.logspace(-2, 2, 10)][None,:,None] + z = np.linspace(0, 1, 7)[None,None,:] + + y1 = cephes.mathieu_modcem2(m, q, -z)[0] + + fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0] + y2 = (-cephes.mathieu_modcem2(m, q, z)[0] + - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]) + + assert_allclose(y1, y2, rtol=1e-10) + + def test_mathieu_modsem1(self): + assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0)) + + def test_mathieu_modsem2(self): + cephes.mathieu_modsem2(1,1,1) + + # Test reflection relation AMS 20.6.20 + m = np.arange(1, 4)[:,None,None] + q = np.r_[np.logspace(-2, 2, 10)][None,:,None] + z = np.linspace(0, 1, 7)[None,None,:] + + y1 = cephes.mathieu_modsem2(m, q, -z)[0] + fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1] + y2 = (cephes.mathieu_modsem2(m, q, z)[0] + - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]) + assert_allclose(y1, y2, rtol=1e-10) + + def test_mathieu_overflow(self): + # Check that these return NaNs instead of causing a SEGV + assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan)) + + def test_mathieu_ticket_1847(self): + # Regression test --- this call had some out-of-bounds access + # and could return nan occasionally + for k in range(60): + v = cephes.mathieu_modsem2(2, 100, -1) + # Values from ACM TOMS 804 (derivate by numerical differentiation) + assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10) + assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4) + + def test_modfresnelm(self): + cephes.modfresnelm(0) + + def test_modfresnelp(self): + cephes.modfresnelp(0) + + def test_modstruve(self): + assert_equal(cephes.modstruve(1,0),0.0) + + def test_nbdtr(self): + assert_equal(cephes.nbdtr(1,1,1),1.0) + + def test_nbdtrc(self): + assert_equal(cephes.nbdtrc(1,1,1),0.0) + + def test_nbdtri(self): + assert_equal(cephes.nbdtri(1,1,1),1.0) + + def test_nbdtrik(self): + cephes.nbdtrik(1,.4,.5) + + def test_nbdtrin(self): + assert_equal(cephes.nbdtrin(1,0,0),5.0) + + def test_ncfdtr(self): + assert_equal(cephes.ncfdtr(1,1,1,0),0.0) + + def test_ncfdtri(self): + assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0) + f = [0.5, 1, 1.5] + p = cephes.ncfdtr(2, 3, 1.5, f) + assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f) + + def test_ncfdtridfd(self): + dfd = [1, 2, 3] + p = cephes.ncfdtr(2, dfd, 0.25, 15) + assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd) + + def test_ncfdtridfn(self): + dfn = [0.1, 1, 2, 3, 1e4] + p = cephes.ncfdtr(dfn, 2, 0.25, 15) + assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5) + + def test_ncfdtrinc(self): + nc = [0.5, 1.5, 2.0] + p = cephes.ncfdtr(2, 3, nc, 15) + assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc) + + def test_nctdtr(self): + assert_equal(cephes.nctdtr(1,0,0),0.5) + assert_equal(cephes.nctdtr(9, 65536, 45), 0.0) + + assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5) + assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.))) + assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.) + + assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.))) + assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.))) + assert_(np.isnan(cephes.nctdtr(2., 1., np.nan))) + + def test_nctdtridf(self): + cephes.nctdtridf(1,0.5,0) + + def test_nctdtrinc(self): + cephes.nctdtrinc(1,0,0) + + def test_nctdtrit(self): + cephes.nctdtrit(.1,0.2,.5) + + def test_nrdtrimn(self): + assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0) + + def test_nrdtrisd(self): + assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0, + atol=0, rtol=0) + + def test_obl_ang1(self): + cephes.obl_ang1(1,1,1,0) + + def test_obl_ang1_cv(self): + result = cephes.obl_ang1_cv(1,1,1,1,0) + assert_almost_equal(result[0],1.0) + assert_almost_equal(result[1],0.0) + + def test_obl_cv(self): + assert_equal(cephes.obl_cv(1,1,0),2.0) + + def test_obl_rad1(self): + cephes.obl_rad1(1,1,1,0) + + def test_obl_rad1_cv(self): + cephes.obl_rad1_cv(1,1,1,1,0) + + def test_obl_rad2(self): + cephes.obl_rad2(1,1,1,0) + + def test_obl_rad2_cv(self): + cephes.obl_rad2_cv(1,1,1,1,0) + + def test_pbdv(self): + assert_equal(cephes.pbdv(1,0),(0.0,1.0)) + + def test_pbvv(self): + cephes.pbvv(1,0) + + def test_pbwa(self): + cephes.pbwa(1,0) + + def test_pdtr(self): + val = cephes.pdtr(0, 1) + assert_almost_equal(val, np.exp(-1)) + # Edge case: m = 0. + val = cephes.pdtr([0, 1, 2], 0) + assert_array_equal(val, [1, 1, 1]) + + def test_pdtrc(self): + val = cephes.pdtrc(0, 1) + assert_almost_equal(val, 1 - np.exp(-1)) + # Edge case: m = 0. + val = cephes.pdtrc([0, 1, 2], 0.0) + assert_array_equal(val, [0, 0, 0]) + + def test_pdtri(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "floating point number truncated to an integer") + cephes.pdtri(0.5,0.5) + + def test_pdtrik(self): + k = cephes.pdtrik(0.5, 1) + assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5) + # Edge case: m = 0 or very small. + k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6]) + assert_array_equal(k, np.zeros((3, 3))) + + def test_pro_ang1(self): + cephes.pro_ang1(1,1,1,0) + + def test_pro_ang1_cv(self): + assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0), + array((1.0,0.0))) + + def test_pro_cv(self): + assert_equal(cephes.pro_cv(1,1,0),2.0) + + def test_pro_rad1(self): + cephes.pro_rad1(1,1,1,0.1) + + def test_pro_rad1_cv(self): + cephes.pro_rad1_cv(1,1,1,1,0) + + def test_pro_rad2(self): + cephes.pro_rad2(1,1,1,0) + + def test_pro_rad2_cv(self): + cephes.pro_rad2_cv(1,1,1,1,0) + + def test_psi(self): + cephes.psi(1) + + def test_radian(self): + assert_equal(cephes.radian(0,0,0),0) + + def test_rgamma(self): + assert_equal(cephes.rgamma(1),1.0) + + def test_round(self): + assert_equal(cephes.round(3.4),3.0) + assert_equal(cephes.round(-3.4),-3.0) + assert_equal(cephes.round(3.6),4.0) + assert_equal(cephes.round(-3.6),-4.0) + assert_equal(cephes.round(3.5),4.0) + assert_equal(cephes.round(-3.5),-4.0) + + def test_shichi(self): + cephes.shichi(1) + + def test_sici(self): + cephes.sici(1) + + s, c = cephes.sici(np.inf) + assert_almost_equal(s, np.pi * 0.5) + assert_almost_equal(c, 0) + + s, c = cephes.sici(-np.inf) + assert_almost_equal(s, -np.pi * 0.5) + assert_(np.isnan(c), "cosine integral(-inf) is not nan") + + def test_sindg(self): + assert_equal(cephes.sindg(90),1.0) + + def test_smirnov(self): + assert_equal(cephes.smirnov(1,.1),0.9) + assert_(np.isnan(cephes.smirnov(1,np.nan))) + + def test_smirnovp(self): + assert_equal(cephes._smirnovp(1, .1), -1) + assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1)) + assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1)) + assert_(np.isnan(cephes._smirnovp(1, np.nan))) + + def test_smirnovc(self): + assert_equal(cephes._smirnovc(1,.1),0.1) + assert_(np.isnan(cephes._smirnovc(1,np.nan))) + x10 = np.linspace(0, 1, 11, endpoint=True) + assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10)) + x4 = np.linspace(0, 1, 5, endpoint=True) + assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4)) + + def test_smirnovi(self): + assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4) + assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6) + assert_(np.isnan(cephes.smirnovi(1,np.nan))) + + def test_smirnovci(self): + assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4) + assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6) + assert_(np.isnan(cephes._smirnovci(1,np.nan))) + + def test_spence(self): + assert_equal(cephes.spence(1),0.0) + + def test_stdtr(self): + assert_equal(cephes.stdtr(1,0),0.5) + assert_almost_equal(cephes.stdtr(1,1), 0.75) + assert_almost_equal(cephes.stdtr(1,2), 0.852416382349) + + def test_stdtridf(self): + cephes.stdtridf(0.7,1) + + def test_stdtrit(self): + cephes.stdtrit(1,0.7) + + def test_struve(self): + assert_equal(cephes.struve(0,0),0.0) + + def test_tandg(self): + assert_equal(cephes.tandg(45),1.0) + + def test_tklmbda(self): + assert_almost_equal(cephes.tklmbda(1,1),1.0) + + def test_y0(self): + cephes.y0(1) + + def test_y1(self): + cephes.y1(1) + + def test_yn(self): + cephes.yn(1,1) + + def test_yv(self): + cephes.yv(1,1) + + def test_yve(self): + cephes.yve(1,1) + + def test_wofz(self): + z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.), + complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.), + complex(-0.0000000234545,1.1234), complex(-3.,5.1), + complex(-53,30.1), complex(0.0,0.12345), + complex(11,1), complex(-22,-2), complex(9,-28), + complex(21,-33), complex(1e5,1e5), complex(1e14,1e14) + ] + w = [ + complex(-3.78270245518980507452677445620103199303131110e-7, + 0.000903861276433172057331093754199933411710053155), + complex(0.1764906227004816847297495349730234591778719532788, + -0.02146550539468457616788719893991501311573031095617), + complex(0.2410250715772692146133539023007113781272362309451, + 0.06087579663428089745895459735240964093522265589350), + complex(0.30474420525691259245713884106959496013413834051768, + -0.20821893820283162728743734725471561394145872072738), + complex(7.317131068972378096865595229600561710140617977e34, + 8.321873499714402777186848353320412813066170427e34), + complex(0.0615698507236323685519612934241429530190806818395, + -0.00676005783716575013073036218018565206070072304635), + complex(0.3960793007699874918961319170187598400134746631, + -5.593152259116644920546186222529802777409274656e-9), + complex(0.08217199226739447943295069917990417630675021771804, + -0.04701291087643609891018366143118110965272615832184), + complex(0.00457246000350281640952328010227885008541748668738, + -0.00804900791411691821818731763401840373998654987934), + complex(0.8746342859608052666092782112565360755791467973338452, + 0.), + complex(0.00468190164965444174367477874864366058339647648741, + 0.0510735563901306197993676329845149741675029197050), + complex(-0.0023193175200187620902125853834909543869428763219, + -0.025460054739731556004902057663500272721780776336), + complex(9.11463368405637174660562096516414499772662584e304, + 3.97101807145263333769664875189354358563218932e305), + complex(-4.4927207857715598976165541011143706155432296e281, + -2.8019591213423077494444700357168707775769028e281), + complex(2.820947917809305132678577516325951485807107151e-6, + 2.820947917668257736791638444590253942253354058e-6), + complex(2.82094791773878143474039725787438662716372268e-15, + 2.82094791773878143474039725773333923127678361e-15) + ] + assert_func_equal(cephes.wofz, w, z, rtol=1e-13) + + +class TestAiry: + def test_airy(self): + # This tests the airy function to ensure 8 place accuracy in computation + + x = special.airy(.99) + assert_array_almost_equal( + x, + array([0.13689066,-0.16050153,1.19815925,0.92046818]), + 8, + ) + x = special.airy(.41) + assert_array_almost_equal( + x, + array([0.25238916,-.23480512,0.80686202,0.51053919]), + 8, + ) + x = special.airy(-.36) + assert_array_almost_equal( + x, + array([0.44508477,-0.23186773,0.44939534,0.48105354]), + 8, + ) + + def test_airye(self): + a = special.airye(0.01) + b = special.airy(0.01) + b1 = [None]*4 + for n in range(2): + b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01)) + for n in range(2,4): + b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01)))) + assert_array_almost_equal(a,b1,6) + + def test_bi_zeros(self): + bi = special.bi_zeros(2) + bia = (array([-1.17371322, -3.2710930]), + array([-2.29443968, -4.07315509]), + array([-0.45494438, 0.39652284]), + array([0.60195789, -0.76031014])) + assert_array_almost_equal(bi,bia,4) + + bi = special.bi_zeros(5) + assert_array_almost_equal(bi[0],array([-1.173713222709127, + -3.271093302836352, + -4.830737841662016, + -6.169852128310251, + -7.376762079367764]),11) + + assert_array_almost_equal(bi[1],array([-2.294439682614122, + -4.073155089071828, + -5.512395729663599, + -6.781294445990305, + -7.940178689168587]),10) + + assert_array_almost_equal(bi[2],array([-0.454944383639657, + 0.396522836094465, + -0.367969161486959, + 0.349499116831805, + -0.336026240133662]),11) + + assert_array_almost_equal(bi[3],array([0.601957887976239, + -0.760310141492801, + 0.836991012619261, + -0.88947990142654, + 0.929983638568022]),10) + + def test_ai_zeros(self): + ai = special.ai_zeros(1) + assert_array_almost_equal(ai,(array([-2.33810741]), + array([-1.01879297]), + array([0.5357]), + array([0.7012])),4) + + def test_ai_zeros_big(self): + z, zp, ai_zpx, aip_zx = special.ai_zeros(50000) + ai_z, aip_z, _, _ = special.airy(z) + ai_zp, aip_zp, _, _ = special.airy(zp) + + ai_envelope = 1/abs(z)**(1./4) + aip_envelope = abs(zp)**(1./4) + + # Check values + assert_allclose(ai_zpx, ai_zp, rtol=1e-10) + assert_allclose(aip_zx, aip_z, rtol=1e-10) + + # Check they are zeros + assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0) + assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0) + + # Check first zeros, DLMF 9.9.1 + assert_allclose(z[:6], + [-2.3381074105, -4.0879494441, -5.5205598281, + -6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10) + assert_allclose(zp[:6], + [-1.0187929716, -3.2481975822, -4.8200992112, + -6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10) + + def test_bi_zeros_big(self): + z, zp, bi_zpx, bip_zx = special.bi_zeros(50000) + _, _, bi_z, bip_z = special.airy(z) + _, _, bi_zp, bip_zp = special.airy(zp) + + bi_envelope = 1/abs(z)**(1./4) + bip_envelope = abs(zp)**(1./4) + + # Check values + assert_allclose(bi_zpx, bi_zp, rtol=1e-10) + assert_allclose(bip_zx, bip_z, rtol=1e-10) + + # Check they are zeros + assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0) + assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0) + + # Check first zeros, DLMF 9.9.2 + assert_allclose(z[:6], + [-1.1737132227, -3.2710933028, -4.8307378417, + -6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10) + assert_allclose(zp[:6], + [-2.2944396826, -4.0731550891, -5.5123957297, + -6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10) + + +class TestAssocLaguerre: + def test_assoc_laguerre(self): + a1 = special.genlaguerre(11,1) + a2 = special.assoc_laguerre(.2,11,1) + assert_array_almost_equal(a2,a1(.2),8) + a2 = special.assoc_laguerre(1,11,1) + assert_array_almost_equal(a2,a1(1),8) + + +class TestBesselpoly: + def test_besselpoly(self): + pass + + +class TestKelvin: + def test_bei(self): + mbei = special.bei(2) + assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact + + def test_beip(self): + mbeip = special.beip(2) + assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact + + def test_ber(self): + mber = special.ber(2) + assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact + + def test_berp(self): + mberp = special.berp(2) + assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact + + def test_bei_zeros(self): + # Abramowitz & Stegun, Table 9.12 + bi = special.bei_zeros(5) + assert_array_almost_equal(bi,array([5.02622, + 9.45541, + 13.89349, + 18.33398, + 22.77544]),4) + + def test_beip_zeros(self): + bip = special.beip_zeros(5) + assert_array_almost_equal(bip,array([3.772673304934953, + 8.280987849760042, + 12.742147523633703, + 17.193431752512542, + 21.641143941167325]),8) + + def test_ber_zeros(self): + ber = special.ber_zeros(5) + assert_array_almost_equal(ber,array([2.84892, + 7.23883, + 11.67396, + 16.11356, + 20.55463]),4) + + def test_berp_zeros(self): + brp = special.berp_zeros(5) + assert_array_almost_equal(brp,array([6.03871, + 10.51364, + 14.96844, + 19.41758, + 23.86430]),4) + + def test_kelvin(self): + mkelv = special.kelvin(2) + assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j, + special.ker(2) + special.kei(2)*1j, + special.berp(2) + special.beip(2)*1j, + special.kerp(2) + special.keip(2)*1j),8) + + def test_kei(self): + mkei = special.kei(2) + assert_almost_equal(mkei,-0.20240006776470432,5) + + def test_keip(self): + mkeip = special.keip(2) + assert_almost_equal(mkeip,0.21980790991960536,5) + + def test_ker(self): + mker = special.ker(2) + assert_almost_equal(mker,-0.041664513991509472,5) + + def test_kerp(self): + mkerp = special.kerp(2) + assert_almost_equal(mkerp,-0.10660096588105264,5) + + def test_kei_zeros(self): + kei = special.kei_zeros(5) + assert_array_almost_equal(kei,array([3.91467, + 8.34422, + 12.78256, + 17.22314, + 21.66464]),4) + + def test_keip_zeros(self): + keip = special.keip_zeros(5) + assert_array_almost_equal(keip,array([4.93181, + 9.40405, + 13.85827, + 18.30717, + 22.75379]),4) + + # numbers come from 9.9 of A&S pg. 381 + def test_kelvin_zeros(self): + tmp = special.kelvin_zeros(5) + berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp + assert_array_almost_equal(berz,array([2.84892, + 7.23883, + 11.67396, + 16.11356, + 20.55463]),4) + assert_array_almost_equal(beiz,array([5.02622, + 9.45541, + 13.89349, + 18.33398, + 22.77544]),4) + assert_array_almost_equal(kerz,array([1.71854, + 6.12728, + 10.56294, + 15.00269, + 19.44382]),4) + assert_array_almost_equal(keiz,array([3.91467, + 8.34422, + 12.78256, + 17.22314, + 21.66464]),4) + assert_array_almost_equal(berpz,array([6.03871, + 10.51364, + 14.96844, + 19.41758, + 23.86430]),4) + assert_array_almost_equal(beipz,array([3.77267, + # table from 1927 had 3.77320 + # but this is more accurate + 8.28099, + 12.74215, + 17.19343, + 21.64114]),4) + assert_array_almost_equal(kerpz,array([2.66584, + 7.17212, + 11.63218, + 16.08312, + 20.53068]),4) + assert_array_almost_equal(keipz,array([4.93181, + 9.40405, + 13.85827, + 18.30717, + 22.75379]),4) + + def test_ker_zeros(self): + ker = special.ker_zeros(5) + assert_array_almost_equal(ker,array([1.71854, + 6.12728, + 10.56294, + 15.00269, + 19.44381]),4) + + def test_kerp_zeros(self): + kerp = special.kerp_zeros(5) + assert_array_almost_equal(kerp,array([2.66584, + 7.17212, + 11.63218, + 16.08312, + 20.53068]),4) + + +class TestBernoulli: + def test_bernoulli(self): + brn = special.bernoulli(5) + assert_array_almost_equal(brn,array([1.0000, + -0.5000, + 0.1667, + 0.0000, + -0.0333, + 0.0000]),4) + + +class TestBeta: + """ + Test beta and betaln. + """ + + def test_beta(self): + assert_equal(special.beta(1, 1), 1.0) + assert_allclose(special.beta(-100.3, 1e-200), special.gamma(1e-200)) + assert_allclose(special.beta(0.0342, 171), 24.070498359873497, + rtol=1e-13, atol=0) + + bet = special.beta(2, 4) + betg = (special.gamma(2)*special.gamma(4))/special.gamma(6) + assert_allclose(bet, betg, rtol=1e-13) + + def test_beta_inf(self): + assert_(np.isinf(special.beta(-1, 2))) + + def test_betaln(self): + assert_equal(special.betaln(1, 1), 0.0) + assert_allclose(special.betaln(-100.3, 1e-200), + special.gammaln(1e-200)) + assert_allclose(special.betaln(0.0342, 170), 3.1811881124242447, + rtol=1e-14, atol=0) + + betln = special.betaln(2, 4) + bet = log(abs(special.beta(2, 4))) + assert_allclose(betln, bet, rtol=1e-13) + + +class TestBetaInc: + """ + Tests for betainc, betaincinv, betaincc, betainccinv. + """ + + def test_a1_b1(self): + # betainc(1, 1, x) is x. + x = np.array([0, 0.25, 1]) + assert_equal(special.betainc(1, 1, x), x) + assert_equal(special.betaincinv(1, 1, x), x) + assert_equal(special.betaincc(1, 1, x), 1 - x) + assert_equal(special.betainccinv(1, 1, x), 1 - x) + + # Nontrivial expected values computed with mpmath: + # from mpmath import mp + # mp.dps = 100 + # p = mp.betainc(a, b, 0, x, regularized=True) + # + # or, e.g., + # + # p = 0.25 + # a, b = 0.0342, 171 + # x = mp.findroot( + # lambda t: mp.betainc(a, b, 0, t, regularized=True) - p, + # (8e-21, 9e-21), + # solver='anderson', + # ) + # + @pytest.mark.parametrize( + 'a, b, x, p', + [(2, 4, 0.3138101704556974, 0.5), + (0.0342, 171.0, 1e-10, 0.552699169018070910641), + # gh-3761: + (0.0342, 171, 8.42313169354797e-21, 0.25), + # gh-4244: + (0.0002742794749792665, 289206.03125, 1.639984034231756e-56, + 0.9688708782196045), + # gh-12796: + (4, 99997, 0.0001947841578892121, 0.999995)]) + def test_betainc_betaincinv(self, a, b, x, p): + p1 = special.betainc(a, b, x) + assert_allclose(p1, p, rtol=1e-15) + x1 = special.betaincinv(a, b, p) + assert_allclose(x1, x, rtol=5e-13) + + # Expected values computed with mpmath: + # from mpmath import mp + # mp.dps = 100 + # p = mp.betainc(a, b, x, 1, regularized=True) + @pytest.mark.parametrize('a, b, x, p', + [(2.5, 3.0, 0.25, 0.833251953125), + (7.5, 13.25, 0.375, 0.43298734645560368593), + (0.125, 7.5, 0.425, 0.0006688257851314237), + (0.125, 18.0, 1e-6, 0.72982359145096327654), + (0.125, 18.0, 0.996, 7.2745875538380150586e-46), + (0.125, 24.0, 0.75, 3.70853404816862016966e-17), + (16.0, 0.75, 0.99999999975, + 5.4408759277418629909e-07), + # gh-4677 (numbers from stackoverflow question): + (0.4211959643503401, 16939.046996018118, + 0.000815296167195521, 1e-7)]) + def test_betaincc_betainccinv(self, a, b, x, p): + p1 = special.betaincc(a, b, x) + assert_allclose(p1, p, rtol=5e-15) + x1 = special.betainccinv(a, b, p) + assert_allclose(x1, x, rtol=8e-15) + + @pytest.mark.parametrize( + 'a, b, y, ref', + [(14.208308325339239, 14.208308325339239, 7.703145458496392e-307, + 8.566004561846704e-23), + (14.0, 14.5, 1e-280, 2.9343915006642424e-21), + (3.5, 15.0, 4e-95, 1.3290751429289227e-28), + (10.0, 1.25, 2e-234, 3.982659092143654e-24), + (4.0, 99997.0, 5e-88, 3.309800566862242e-27)] + ) + def test_betaincinv_tiny_y(self, a, b, y, ref): + # Test with extremely small y values. This test includes + # a regression test for an issue in the boost code; + # see https://github.com/boostorg/math/issues/961 + # + # The reference values were computed with mpmath. For example, + # + # from mpmath import mp + # mp.dps = 1000 + # a = 14.208308325339239 + # p = 7.703145458496392e-307 + # x = mp.findroot(lambda t: mp.betainc(a, a, 0, t, + # regularized=True) - p, + # x0=8.566e-23) + # print(float(x)) + # + x = special.betaincinv(a, b, y) + assert_allclose(x, ref, rtol=1e-14) + + @pytest.mark.parametrize('func', [special.betainc, special.betaincinv, + special.betaincc, special.betainccinv]) + @pytest.mark.parametrize('args', [(-1.0, 2, 0.5), (0, 2, 0.5), + (1.5, -2.0, 0.5), (1.5, 0, 0.5), + (1.5, 2.0, -0.3), (1.5, 2.0, 1.1)]) + def test_betainc_domain_errors(self, func, args): + with special.errstate(domain='raise'): + with pytest.raises(special.SpecialFunctionError, match='domain'): + special.betainc(*args) + + +class TestCombinatorics: + def test_comb(self): + assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.]) + assert_almost_equal(special.comb(10, 3), 120.) + assert_equal(special.comb(10, 3, exact=True), 120) + assert_equal(special.comb(10, 3, exact=True, repetition=True), 220) + + assert_allclose([special.comb(20, k, exact=True) for k in range(21)], + special.comb(20, list(range(21))), atol=1e-15) + + ii = np.iinfo(int).max + 1 + assert_equal(special.comb(ii, ii-1, exact=True), ii) + + expected = 100891344545564193334812497256 + assert special.comb(100, 50, exact=True) == expected + + @pytest.mark.parametrize("repetition", [True, False]) + @pytest.mark.parametrize("legacy", [True, False, _NoValue]) + @pytest.mark.parametrize("k", [3.5, 3]) + @pytest.mark.parametrize("N", [4.5, 4]) + def test_comb_legacy(self, N, k, legacy, repetition): + # test is only relevant for exact=True + if legacy is not _NoValue: + with pytest.warns( + DeprecationWarning, + match=r"Using 'legacy' keyword is deprecated" + ): + result = special.comb(N, k, exact=True, legacy=legacy, + repetition=repetition) + else: + result = special.comb(N, k, exact=True, legacy=legacy, + repetition=repetition) + if legacy: + # for exact=True and legacy=True, cast input arguments, else don't + if repetition: + # the casting in legacy mode happens AFTER transforming N & k, + # so rounding can change (e.g. both floats, but sum to int); + # hence we need to emulate the repetition-transformation here + N, k = int(N + k - 1), int(k) + repetition = False + else: + N, k = int(N), int(k) + # expected result is the same as with exact=False + with suppress_warnings() as sup: + if legacy is not _NoValue: + sup.filter(DeprecationWarning) + expected = special.comb(N, k, legacy=legacy, repetition=repetition) + assert_equal(result, expected) + + def test_comb_with_np_int64(self): + n = 70 + k = 30 + np_n = np.int64(n) + np_k = np.int64(k) + res_np = special.comb(np_n, np_k, exact=True) + res_py = special.comb(n, k, exact=True) + assert res_np == res_py + + def test_comb_zeros(self): + assert_equal(special.comb(2, 3, exact=True), 0) + assert_equal(special.comb(-1, 3, exact=True), 0) + assert_equal(special.comb(2, -1, exact=True), 0) + assert_equal(special.comb(2, -1, exact=False), 0) + assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]), + [0., 0., 0., 120.]) + + def test_perm(self): + assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.]) + assert_almost_equal(special.perm(10, 3), 720.) + assert_equal(special.perm(10, 3, exact=True), 720) + + def test_perm_zeros(self): + assert_equal(special.perm(2, 3, exact=True), 0) + assert_equal(special.perm(-1, 3, exact=True), 0) + assert_equal(special.perm(2, -1, exact=True), 0) + assert_equal(special.perm(2, -1, exact=False), 0) + assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]), + [0., 0., 0., 720.]) + + def test_positional_deprecation(self): + with pytest.deprecated_call(match="use keyword arguments"): + # from test_comb + special.comb([10, 10], [3, 4], False, False) + + +class TestTrigonometric: + def test_cbrt(self): + cb = special.cbrt(27) + cbrl = 27**(1.0/3.0) + assert_approx_equal(cb,cbrl) + + def test_cbrtmore(self): + cb1 = special.cbrt(27.9) + cbrl1 = 27.9**(1.0/3.0) + assert_almost_equal(cb1,cbrl1,8) + + def test_cosdg(self): + cdg = special.cosdg(90) + cdgrl = cos(pi/2.0) + assert_almost_equal(cdg,cdgrl,8) + + def test_cosdgmore(self): + cdgm = special.cosdg(30) + cdgmrl = cos(pi/6.0) + assert_almost_equal(cdgm,cdgmrl,8) + + def test_cosm1(self): + cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10)) + csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1) + assert_array_almost_equal(cs,csrl,8) + + def test_cotdg(self): + ct = special.cotdg(30) + ctrl = tan(pi/6.0)**(-1) + assert_almost_equal(ct,ctrl,8) + + def test_cotdgmore(self): + ct1 = special.cotdg(45) + ctrl1 = tan(pi/4.0)**(-1) + assert_almost_equal(ct1,ctrl1,8) + + def test_specialpoints(self): + assert_almost_equal(special.cotdg(45), 1.0, 14) + assert_almost_equal(special.cotdg(-45), -1.0, 14) + assert_almost_equal(special.cotdg(90), 0.0, 14) + assert_almost_equal(special.cotdg(-90), 0.0, 14) + assert_almost_equal(special.cotdg(135), -1.0, 14) + assert_almost_equal(special.cotdg(-135), 1.0, 14) + assert_almost_equal(special.cotdg(225), 1.0, 14) + assert_almost_equal(special.cotdg(-225), -1.0, 14) + assert_almost_equal(special.cotdg(270), 0.0, 14) + assert_almost_equal(special.cotdg(-270), 0.0, 14) + assert_almost_equal(special.cotdg(315), -1.0, 14) + assert_almost_equal(special.cotdg(-315), 1.0, 14) + assert_almost_equal(special.cotdg(765), 1.0, 14) + + def test_sinc(self): + # the sinc implementation and more extensive sinc tests are in numpy + assert_array_equal(special.sinc([0]), 1) + assert_equal(special.sinc(0.0), 1.0) + + def test_sindg(self): + sn = special.sindg(90) + assert_equal(sn,1.0) + + def test_sindgmore(self): + snm = special.sindg(30) + snmrl = sin(pi/6.0) + assert_almost_equal(snm,snmrl,8) + snm1 = special.sindg(45) + snmrl1 = sin(pi/4.0) + assert_almost_equal(snm1,snmrl1,8) + + +class TestTandg: + + def test_tandg(self): + tn = special.tandg(30) + tnrl = tan(pi/6.0) + assert_almost_equal(tn,tnrl,8) + + def test_tandgmore(self): + tnm = special.tandg(45) + tnmrl = tan(pi/4.0) + assert_almost_equal(tnm,tnmrl,8) + tnm1 = special.tandg(60) + tnmrl1 = tan(pi/3.0) + assert_almost_equal(tnm1,tnmrl1,8) + + def test_specialpoints(self): + assert_almost_equal(special.tandg(0), 0.0, 14) + assert_almost_equal(special.tandg(45), 1.0, 14) + assert_almost_equal(special.tandg(-45), -1.0, 14) + assert_almost_equal(special.tandg(135), -1.0, 14) + assert_almost_equal(special.tandg(-135), 1.0, 14) + assert_almost_equal(special.tandg(180), 0.0, 14) + assert_almost_equal(special.tandg(-180), 0.0, 14) + assert_almost_equal(special.tandg(225), 1.0, 14) + assert_almost_equal(special.tandg(-225), -1.0, 14) + assert_almost_equal(special.tandg(315), -1.0, 14) + assert_almost_equal(special.tandg(-315), 1.0, 14) + + +class TestEllip: + def test_ellipj_nan(self): + """Regression test for #912.""" + special.ellipj(0.5, np.nan) + + def test_ellipj(self): + el = special.ellipj(0.2,0) + rel = [sin(0.2),cos(0.2),1.0,0.20] + assert_array_almost_equal(el,rel,13) + + def test_ellipk(self): + elk = special.ellipk(.2) + assert_almost_equal(elk,1.659623598610528,11) + + assert_equal(special.ellipkm1(0.0), np.inf) + assert_equal(special.ellipkm1(1.0), pi/2) + assert_equal(special.ellipkm1(np.inf), 0.0) + assert_equal(special.ellipkm1(np.nan), np.nan) + assert_equal(special.ellipkm1(-1), np.nan) + assert_allclose(special.ellipk(-10), 0.7908718902387385) + + def test_ellipkinc(self): + elkinc = special.ellipkinc(pi/2,.2) + elk = special.ellipk(0.2) + assert_almost_equal(elkinc,elk,15) + alpha = 20*pi/180 + phi = 45*pi/180 + m = sin(alpha)**2 + elkinc = special.ellipkinc(phi,m) + assert_almost_equal(elkinc,0.79398143,8) + # From pg. 614 of A & S + + assert_equal(special.ellipkinc(pi/2, 0.0), pi/2) + assert_equal(special.ellipkinc(pi/2, 1.0), np.inf) + assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0) + assert_equal(special.ellipkinc(pi/2, np.nan), np.nan) + assert_equal(special.ellipkinc(pi/2, 2), np.nan) + assert_equal(special.ellipkinc(0, 0.5), 0.0) + assert_equal(special.ellipkinc(np.inf, 0.5), np.inf) + assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf) + assert_equal(special.ellipkinc(np.inf, np.inf), np.nan) + assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan) + assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan) + assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan) + assert_equal(special.ellipkinc(np.nan, 0.5), np.nan) + assert_equal(special.ellipkinc(np.nan, np.nan), np.nan) + + assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14) + assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946) + + def test_ellipkinc_2(self): + # Regression test for gh-3550 + # ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value + mbad = 0.68359375000000011 + phi = 0.9272952180016123 + m = np.nextafter(mbad, 0) + mvals = [] + for j in range(10): + mvals.append(m) + m = np.nextafter(m, 1) + f = special.ellipkinc(phi, mvals) + assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1) + # this bug also appears at phi + n * pi for at least small n + f1 = special.ellipkinc(phi + pi, mvals) + assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2) + + def test_ellipkinc_singular(self): + # ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2) + xlog = np.logspace(-300, -17, 25) + xlin = np.linspace(1e-17, 0.1, 25) + xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False) + + assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), + rtol=1e14) + assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), + rtol=1e14) + assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), + rtol=1e14) + assert_equal(special.ellipkinc(np.pi/2, 1), np.inf) + assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), + rtol=1e14) + assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), + rtol=1e14) + assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), + rtol=1e14) + assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf) + + def test_ellipe(self): + ele = special.ellipe(.2) + assert_almost_equal(ele,1.4890350580958529,8) + + assert_equal(special.ellipe(0.0), pi/2) + assert_equal(special.ellipe(1.0), 1.0) + assert_equal(special.ellipe(-np.inf), np.inf) + assert_equal(special.ellipe(np.nan), np.nan) + assert_equal(special.ellipe(2), np.nan) + assert_allclose(special.ellipe(-10), 3.6391380384177689) + + def test_ellipeinc(self): + eleinc = special.ellipeinc(pi/2,.2) + ele = special.ellipe(0.2) + assert_almost_equal(eleinc,ele,14) + # pg 617 of A & S + alpha, phi = 52*pi/180,35*pi/180 + m = sin(alpha)**2 + eleinc = special.ellipeinc(phi,m) + assert_almost_equal(eleinc, 0.58823065, 8) + + assert_equal(special.ellipeinc(pi/2, 0.0), pi/2) + assert_equal(special.ellipeinc(pi/2, 1.0), 1.0) + assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf) + assert_equal(special.ellipeinc(pi/2, np.nan), np.nan) + assert_equal(special.ellipeinc(pi/2, 2), np.nan) + assert_equal(special.ellipeinc(0, 0.5), 0.0) + assert_equal(special.ellipeinc(np.inf, 0.5), np.inf) + assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf) + assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf) + assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf) + assert_equal(special.ellipeinc(np.inf, np.inf), np.nan) + assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan) + assert_equal(special.ellipeinc(np.nan, 0.5), np.nan) + assert_equal(special.ellipeinc(np.nan, np.nan), np.nan) + assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876) + + def test_ellipeinc_2(self): + # Regression test for gh-3550 + # ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value + mbad = 0.68359375000000011 + phi = 0.9272952180016123 + m = np.nextafter(mbad, 0) + mvals = [] + for j in range(10): + mvals.append(m) + m = np.nextafter(m, 1) + f = special.ellipeinc(phi, mvals) + assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2) + # this bug also appears at phi + n * pi for at least small n + f1 = special.ellipeinc(phi + pi, mvals) + assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4) + + +class TestEllipCarlson: + """Test for Carlson elliptic integrals ellipr[cdfgj]. + The special values used in these tests can be found in Sec. 3 of Carlson + (1994), https://arxiv.org/abs/math/9409227 + """ + def test_elliprc(self): + assert_allclose(elliprc(1, 1), 1) + assert elliprc(1, inf) == 0.0 + assert isnan(elliprc(1, 0)) + assert elliprc(1, complex(1, inf)) == 0.0 + args = array([[0.0, 0.25], + [2.25, 2.0], + [0.0, 1.0j], + [-1.0j, 1.0j], + [0.25, -2.0], + [1.0j, -1.0]]) + expected_results = array([np.pi, + np.log(2.0), + 1.1107207345396 * (1.0-1.0j), + 1.2260849569072-0.34471136988768j, + np.log(2.0) / 3.0, + 0.77778596920447+0.19832484993429j]) + for i, arr in enumerate(args): + assert_allclose(elliprc(*arr), expected_results[i]) + + def test_elliprd(self): + assert_allclose(elliprd(1, 1, 1), 1) + assert_allclose(elliprd(0, 2, 1) / 3.0, 0.59907011736779610371) + assert elliprd(1, 1, inf) == 0.0 + assert np.isinf(elliprd(1, 1, 0)) + assert np.isinf(elliprd(1, 1, complex(0, 0))) + assert np.isinf(elliprd(0, 1, complex(0, 0))) + assert isnan(elliprd(1, 1, -np.finfo(np.float64).tiny / 2.0)) + assert isnan(elliprd(1, 1, complex(-1, 0))) + args = array([[0.0, 2.0, 1.0], + [2.0, 3.0, 4.0], + [1.0j, -1.0j, 2.0], + [0.0, 1.0j, -1.0j], + [0.0, -1.0+1.0j, 1.0j], + [-2.0-1.0j, -1.0j, -1.0+1.0j]]) + expected_results = array([1.7972103521034, + 0.16510527294261, + 0.65933854154220, + 1.2708196271910+2.7811120159521j, + -1.8577235439239-0.96193450888839j, + 1.8249027393704-1.2218475784827j]) + for i, arr in enumerate(args): + assert_allclose(elliprd(*arr), expected_results[i]) + + def test_elliprf(self): + assert_allclose(elliprf(1, 1, 1), 1) + assert_allclose(elliprf(0, 1, 2), 1.31102877714605990523) + assert elliprf(1, inf, 1) == 0.0 + assert np.isinf(elliprf(0, 1, 0)) + assert isnan(elliprf(1, 1, -1)) + assert elliprf(complex(inf), 0, 1) == 0.0 + assert isnan(elliprf(1, 1, complex(-inf, 1))) + args = array([[1.0, 2.0, 0.0], + [1.0j, -1.0j, 0.0], + [0.5, 1.0, 0.0], + [-1.0+1.0j, 1.0j, 0.0], + [2.0, 3.0, 4.0], + [1.0j, -1.0j, 2.0], + [-1.0+1.0j, 1.0j, 1.0-1.0j]]) + expected_results = array([1.3110287771461, + 1.8540746773014, + 1.8540746773014, + 0.79612586584234-1.2138566698365j, + 0.58408284167715, + 1.0441445654064, + 0.93912050218619-0.53296252018635j]) + for i, arr in enumerate(args): + assert_allclose(elliprf(*arr), expected_results[i]) + + def test_elliprg(self): + assert_allclose(elliprg(1, 1, 1), 1) + assert_allclose(elliprg(0, 0, 1), 0.5) + assert_allclose(elliprg(0, 0, 0), 0) + assert np.isinf(elliprg(1, inf, 1)) + assert np.isinf(elliprg(complex(inf), 1, 1)) + args = array([[0.0, 16.0, 16.0], + [2.0, 3.0, 4.0], + [0.0, 1.0j, -1.0j], + [-1.0+1.0j, 1.0j, 0.0], + [-1.0j, -1.0+1.0j, 1.0j], + [0.0, 0.0796, 4.0]]) + expected_results = array([np.pi, + 1.7255030280692, + 0.42360654239699, + 0.44660591677018+0.70768352357515j, + 0.36023392184473+0.40348623401722j, + 1.0284758090288]) + for i, arr in enumerate(args): + assert_allclose(elliprg(*arr), expected_results[i]) + + def test_elliprj(self): + assert_allclose(elliprj(1, 1, 1, 1), 1) + assert elliprj(1, 1, inf, 1) == 0.0 + assert isnan(elliprj(1, 0, 0, 0)) + assert isnan(elliprj(-1, 1, 1, 1)) + assert elliprj(1, 1, 1, inf) == 0.0 + args = array([[0.0, 1.0, 2.0, 3.0], + [2.0, 3.0, 4.0, 5.0], + [2.0, 3.0, 4.0, -1.0+1.0j], + [1.0j, -1.0j, 0.0, 2.0], + [-1.0+1.0j, -1.0-1.0j, 1.0, 2.0], + [1.0j, -1.0j, 0.0, 1.0-1.0j], + [-1.0+1.0j, -1.0-1.0j, 1.0, -3.0+1.0j], + [2.0, 3.0, 4.0, -0.5], # Cauchy principal value + [2.0, 3.0, 4.0, -5.0]]) # Cauchy principal value + expected_results = array([0.77688623778582, + 0.14297579667157, + 0.13613945827771-0.38207561624427j, + 1.6490011662711, + 0.94148358841220, + 1.8260115229009+1.2290661908643j, + -0.61127970812028-1.0684038390007j, + 0.24723819703052, # Cauchy principal value + -0.12711230042964]) # Caucny principal value + for i, arr in enumerate(args): + assert_allclose(elliprj(*arr), expected_results[i]) + + @pytest.mark.xfail(reason="Insufficient accuracy on 32-bit") + def test_elliprj_hard(self): + assert_allclose(elliprj(6.483625725195452e-08, + 1.1649136528196886e-27, + 3.6767340167168e+13, + 0.493704617023468), + 8.63426920644241857617477551054e-6, + rtol=5e-15, atol=1e-20) + assert_allclose(elliprj(14.375105857849121, + 9.993988969725365e-11, + 1.72844262269944e-26, + 5.898871222598245e-06), + 829774.1424801627252574054378691828, + rtol=5e-15, atol=1e-20) + + +class TestEllipLegendreCarlsonIdentities: + """Test identities expressing the Legendre elliptic integrals in terms + of Carlson's symmetric integrals. These identities can be found + in the DLMF https://dlmf.nist.gov/19.25#i . + """ + + def setup_class(self): + self.m_n1_1 = np.arange(-1., 1., 0.01) + # For double, this is -(2**1024) + self.max_neg = finfo(double).min + # Lots of very negative numbers + self.very_neg_m = -1. * 2.**arange(-1 + + np.log2(-self.max_neg), 0., + -1.) + self.ms_up_to_1 = np.concatenate(([self.max_neg], + self.very_neg_m, + self.m_n1_1)) + + def test_k(self): + """Test identity: + K(m) = R_F(0, 1-m, 1) + """ + m = self.ms_up_to_1 + assert_allclose(ellipk(m), elliprf(0., 1.-m, 1.)) + + def test_km1(self): + """Test identity: + K(m) = R_F(0, 1-m, 1) + But with the ellipkm1 function + """ + # For double, this is 2**-1022 + tiny = finfo(double).tiny + # All these small powers of 2, up to 2**-1 + m1 = tiny * 2.**arange(0., -np.log2(tiny)) + assert_allclose(ellipkm1(m1), elliprf(0., m1, 1.)) + + def test_e(self): + """Test identity: + E(m) = 2*R_G(0, 1-k^2, 1) + """ + m = self.ms_up_to_1 + assert_allclose(ellipe(m), 2.*elliprg(0., 1.-m, 1.)) + + +class TestErf: + + def test_erf(self): + er = special.erf(.25) + assert_almost_equal(er,0.2763263902,8) + + def test_erf_zeros(self): + erz = special.erf_zeros(5) + erzr = array([1.45061616+1.88094300j, + 2.24465928+2.61657514j, + 2.83974105+3.17562810j, + 3.33546074+3.64617438j, + 3.76900557+4.06069723j]) + assert_array_almost_equal(erz,erzr,4) + + def _check_variant_func(self, func, other_func, rtol, atol=0): + np.random.seed(1234) + n = 10000 + x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1) + y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1) + z = x + 1j*y + + with np.errstate(all='ignore'): + w = other_func(z) + w_real = other_func(x).real + + mask = np.isfinite(w) + w = w[mask] + z = z[mask] + + mask = np.isfinite(w_real) + w_real = w_real[mask] + x = x[mask] + + # test both real and complex variants + assert_func_equal(func, w, z, rtol=rtol, atol=atol) + assert_func_equal(func, w_real, x, rtol=rtol, atol=atol) + + def test_erfc_consistent(self): + self._check_variant_func( + cephes.erfc, + lambda z: 1 - cephes.erf(z), + rtol=1e-12, + atol=1e-14 # <- the test function loses precision + ) + + def test_erfcx_consistent(self): + self._check_variant_func( + cephes.erfcx, + lambda z: np.exp(z*z) * cephes.erfc(z), + rtol=1e-12 + ) + + def test_erfi_consistent(self): + self._check_variant_func( + cephes.erfi, + lambda z: -1j * cephes.erf(1j*z), + rtol=1e-12 + ) + + def test_dawsn_consistent(self): + self._check_variant_func( + cephes.dawsn, + lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z), + rtol=1e-12 + ) + + def test_erf_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -1, 1] + assert_allclose(special.erf(vals), expected, rtol=1e-15) + + def test_erfc_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, 2, 0] + assert_allclose(special.erfc(vals), expected, rtol=1e-15) + + def test_erfcx_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, np.inf, 0] + assert_allclose(special.erfcx(vals), expected, rtol=1e-15) + + def test_erfi_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -np.inf, np.inf] + assert_allclose(special.erfi(vals), expected, rtol=1e-15) + + def test_dawsn_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -0.0, 0.0] + assert_allclose(special.dawsn(vals), expected, rtol=1e-15) + + def test_wofz_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j] + assert_allclose(special.wofz(vals), expected, rtol=1e-15) + + +class TestEuler: + def test_euler(self): + eu0 = special.euler(0) + eu1 = special.euler(1) + eu2 = special.euler(2) # just checking segfaults + assert_allclose(eu0, [1], rtol=1e-15) + assert_allclose(eu1, [1, 0], rtol=1e-15) + assert_allclose(eu2, [1, 0, -1], rtol=1e-15) + eu24 = special.euler(24) + mathworld = [1,1,5,61,1385,50521,2702765,199360981, + 19391512145,2404879675441, + 370371188237525,69348874393137901, + 15514534163557086905] + correct = zeros((25,),'d') + for k in range(0,13): + if (k % 2): + correct[2*k] = -float(mathworld[k]) + else: + correct[2*k] = float(mathworld[k]) + with np.errstate(all='ignore'): + err = nan_to_num((eu24-correct)/correct) + errmax = max(err) + assert_almost_equal(errmax, 0.0, 14) + + +class TestExp: + def test_exp2(self): + ex = special.exp2(2) + exrl = 2**2 + assert_equal(ex,exrl) + + def test_exp2more(self): + exm = special.exp2(2.5) + exmrl = 2**(2.5) + assert_almost_equal(exm,exmrl,8) + + def test_exp10(self): + ex = special.exp10(2) + exrl = 10**2 + assert_approx_equal(ex,exrl) + + def test_exp10more(self): + exm = special.exp10(2.5) + exmrl = 10**(2.5) + assert_almost_equal(exm,exmrl,8) + + def test_expm1(self): + ex = (special.expm1(2),special.expm1(3),special.expm1(4)) + exrl = (exp(2)-1,exp(3)-1,exp(4)-1) + assert_array_almost_equal(ex,exrl,8) + + def test_expm1more(self): + ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2)) + exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1) + assert_array_almost_equal(ex1,exrl1,8) + + +class TestFactorialFunctions: + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_scalar_return_type(self, exact): + assert np.isscalar(special.factorial(1, exact=exact)) + assert np.isscalar(special.factorial2(1, exact=exact)) + assert np.isscalar(special.factorialk(1, 3, exact=exact)) + + @pytest.mark.parametrize("n", [-1, -2, -3]) + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_negative(self, exact, n): + assert_equal(special.factorial(n, exact=exact), 0) + assert_equal(special.factorial2(n, exact=exact), 0) + assert_equal(special.factorialk(n, 3, exact=exact), 0) + + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_negative_array(self, exact): + assert_func = assert_array_equal if exact else assert_allclose + # Consistent output for n < 0 + assert_func(special.factorial([-5, -4, 0, 1], exact=exact), + [0, 0, 1, 1]) + assert_func(special.factorial2([-5, -4, 0, 1], exact=exact), + [0, 0, 1, 1]) + assert_func(special.factorialk([-5, -4, 0, 1], 3, exact=exact), + [0, 0, 1, 1]) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("content", [np.nan, None, np.datetime64('nat')], + ids=["NaN", "None", "NaT"]) + def test_factorialx_nan(self, content, exact): + # scalar + assert special.factorial(content, exact=exact) is np.nan + assert special.factorial2(content, exact=exact) is np.nan + assert special.factorialk(content, 3, exact=exact) is np.nan + # array-like (initializes np.array with default dtype) + if content is not np.nan: + # None causes object dtype, which is not supported; as is datetime + with pytest.raises(ValueError, match="Unsupported datatype.*"): + special.factorial([content], exact=exact) + elif exact: + with pytest.raises(ValueError, match="factorial with `exact=Tr.*"): + special.factorial([content], exact=exact) + else: + assert np.isnan(special.factorial([content], exact=exact)[0]) + # factorial{2,k} don't support array case due to dtype constraints + with pytest.raises(ValueError, match="factorial2 does not support.*"): + special.factorial2([content], exact=exact) + with pytest.raises(ValueError, match="factorialk does not support.*"): + special.factorialk([content], 3, exact=exact) + # array-case also tested in test_factorial{,2,k}_corner_cases + + @pytest.mark.parametrize("levels", range(1, 5)) + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_array_shape(self, levels, exact): + def _nest_me(x, k=1): + """ + Double x and nest it k times + + For example: + >>> _nest_me([3, 4], 2) + [[[3, 4], [3, 4]], [[3, 4], [3, 4]]] + """ + if k == 0: + return x + else: + return _nest_me([x, x], k-1) + + def _check(res, nucleus): + exp = np.array(_nest_me(nucleus, k=levels), dtype=object) + # test that ndarray shape is maintained + # need to cast to float due to numpy/numpy#21220 + assert_allclose(res.astype(np.float64), exp.astype(np.float64)) + + n = np.array(_nest_me([5, 25], k=levels)) + exp_nucleus = {1: [120, math.factorial(25)], + # correctness of factorial{2,k}() is tested elsewhere + 2: [15, special.factorial2(25, exact=True)], + 3: [10, special.factorialk(25, 3, exact=True)]} + + _check(special.factorial(n, exact=exact), exp_nucleus[1]) + _check(special.factorial2(n, exact=exact), exp_nucleus[2]) + _check(special.factorialk(n, 3, exact=exact), exp_nucleus[3]) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dtype", [ + None, int, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64 + ]) + @pytest.mark.parametrize("dim", range(0, 5)) + def test_factorialx_array_dimension(self, dim, dtype, exact): + n = np.array(5, dtype=dtype, ndmin=dim) + exp = {1: 120, 2: 15, 3: 10} + assert_allclose(special.factorial(n, exact=exact), + np.array(exp[1], ndmin=dim)) + assert_allclose(special.factorial2(n, exact=exact), + np.array(exp[2], ndmin=dim)) + assert_allclose(special.factorialk(n, 3, exact=exact), + np.array(exp[3], ndmin=dim)) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("level", range(1, 5)) + def test_factorialx_array_like(self, level, exact): + def _nest_me(x, k=1): + if k == 0: + return x + else: + return _nest_me([x], k-1) + + n = _nest_me([5], k=level-1) # nested list + exp_nucleus = {1: 120, 2: 15, 3: 10} + assert_func = assert_array_equal if exact else assert_allclose + assert_func(special.factorial(n, exact=exact), + np.array(exp_nucleus[1], ndmin=level)) + assert_func(special.factorial2(n, exact=exact), + np.array(exp_nucleus[2], ndmin=level)) + assert_func(special.factorialk(n, 3, exact=exact), + np.array(exp_nucleus[3], ndmin=level)) + + # note that n=170 is the last integer such that factorial(n) fits float64 + @pytest.mark.parametrize('n', range(30, 180, 10)) + def test_factorial_accuracy(self, n): + # Compare exact=True vs False, i.e. that the accuracy of the + # approximation is better than the specified tolerance. + + rtol = 6e-14 if sys.platform == 'win32' else 1e-15 + # need to cast exact result to float due to numpy/numpy#21220 + assert_allclose(float(special.factorial(n, exact=True)), + special.factorial(n, exact=False), rtol=rtol) + assert_allclose(special.factorial([n], exact=True).astype(float), + special.factorial([n], exact=False), rtol=rtol) + + @pytest.mark.parametrize('n', + list(range(0, 22)) + list(range(30, 180, 10))) + def test_factorial_int_reference(self, n): + # Compare all with math.factorial + correct = math.factorial(n) + assert_array_equal(correct, special.factorial(n, True)) + assert_array_equal(correct, special.factorial([n], True)[0]) + + rtol = 6e-14 if sys.platform == 'win32' else 1e-15 + assert_allclose(float(correct), special.factorial(n, False), + rtol=rtol) + assert_allclose(float(correct), special.factorial([n], False)[0], + rtol=rtol) + + def test_factorial_float_reference(self): + def _check(n, expected): + assert_allclose(special.factorial(n), expected) + assert_allclose(special.factorial([n])[0], expected) + # using floats with exact=True is deprecated for scalars... + with pytest.deprecated_call(match="Non-integer values.*"): + assert_allclose(special.factorial(n, exact=True), expected) + # ... and already an error for arrays + with pytest.raises(ValueError, match="factorial with `exact=Tr.*"): + special.factorial([n], exact=True) + + # Reference values from mpmath for gamma(n+1) + _check(0.01, 0.994325851191506032181932988) + _check(1.11, 1.051609009483625091514147465) + _check(5.55, 314.9503192327208241614959052) + _check(11.1, 50983227.84411615655137170553) + _check(33.3, 2.493363339642036352229215273e+37) + _check(55.5, 9.479934358436729043289162027e+73) + _check(77.7, 3.060540559059579022358692625e+114) + _check(99.9, 5.885840419492871504575693337e+157) + # close to maximum for float64 + _check(170.6243, 1.79698185749571048960082e+308) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64, + np.complex128, object]) + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dim", range(0, 5)) + # test empty & non-empty arrays, with nans and mixed + @pytest.mark.parametrize("content", + [[], [1], [1.1], [np.nan], [np.nan, 1]], + ids=["[]", "[1]", "[1.1]", "[NaN]", "[NaN, 1]"]) + def test_factorial_array_corner_cases(self, content, dim, exact, dtype): + if dtype == np.int64 and any(np.isnan(x) for x in content): + pytest.skip("impossible combination") + # np.array(x, ndim=0) will not be 0-dim. unless x is too + content = content if (dim > 0 or len(content) != 1) else content[0] + n = np.array(content, ndmin=dim, dtype=dtype) + result = None + if not content: + result = special.factorial(n, exact=exact) + elif not (np.issubdtype(n.dtype, np.integer) + or np.issubdtype(n.dtype, np.floating)): + with pytest.raises(ValueError, match="Unsupported datatype*"): + special.factorial(n, exact=exact) + elif exact and not np.issubdtype(n.dtype, np.integer): + with pytest.raises(ValueError, match="factorial with `exact=.*"): + special.factorial(n, exact=exact) + else: + # no error + result = special.factorial(n, exact=exact) + + # assert_equal does not distinguish scalars and 0-dim arrays of the same value, + # see https://github.com/numpy/numpy/issues/24050 + def assert_really_equal(x, y): + assert type(x) == type(y), f"types not equal: {type(x)}, {type(y)}" + assert_equal(x, y) + + if result is not None: + # keep 0-dim.; otherwise n.ravel().ndim==1, even if n.ndim==0 + n_flat = n.ravel() if n.ndim else n + ref = special.factorial(n_flat, exact=exact) if n.size else [] + # expected result is empty if and only if n is empty, + # and has the same dtype & dimension as n + expected = np.array(ref, ndmin=dim, dtype=dtype) + assert_really_equal(result, expected) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None], + ids=["1", "1.1", "2+2j", "NaN", "None"]) + def test_factorial_scalar_corner_cases(self, n, exact): + if (n is None or n is np.nan or np.issubdtype(type(n), np.integer) + or np.issubdtype(type(n), np.floating)): + # no error + if (np.issubdtype(type(n), np.floating) and exact + and n is not np.nan): + with pytest.deprecated_call(match="Non-integer values.*"): + result = special.factorial(n, exact=exact) + else: + result = special.factorial(n, exact=exact) + exp = np.nan if n is np.nan or n is None else special.factorial(n) + assert_equal(result, exp) + else: + with pytest.raises(ValueError, match="Unsupported datatype*"): + special.factorial(n, exact=exact) + + # use odd increment to make sure both odd & even numbers are tested! + @pytest.mark.parametrize('n', range(30, 180, 11)) + def test_factorial2_accuracy(self, n): + # Compare exact=True vs False, i.e. that the accuracy of the + # approximation is better than the specified tolerance. + + rtol = 2e-14 if sys.platform == 'win32' else 1e-15 + # need to cast exact result to float due to numpy/numpy#21220 + assert_allclose(float(special.factorial2(n, exact=True)), + special.factorial2(n, exact=False), rtol=rtol) + assert_allclose(special.factorial2([n], exact=True).astype(float), + special.factorial2([n], exact=False), rtol=rtol) + + @pytest.mark.parametrize('n', + list(range(0, 22)) + list(range(30, 180, 11))) + def test_factorial2_int_reference(self, n): + # Compare all with correct value + + # Cannot use np.product due to overflow + correct = functools.reduce(operator.mul, list(range(n, 0, -2)), 1) + + assert_array_equal(correct, special.factorial2(n, True)) + assert_array_equal(correct, special.factorial2([n], True)[0]) + + assert_allclose(float(correct), special.factorial2(n, False)) + assert_allclose(float(correct), special.factorial2([n], False)[0]) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64, + np.complex128, object]) + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dim", range(0, 5)) + # test empty & non-empty arrays, with nans and mixed + @pytest.mark.parametrize("content", [[], [1], [np.nan], [np.nan, 1]], + ids=["[]", "[1]", "[NaN]", "[NaN, 1]"]) + def test_factorial2_array_corner_cases(self, content, dim, exact, dtype): + if dtype == np.int64 and any(np.isnan(x) for x in content): + pytest.skip("impossible combination") + # np.array(x, ndim=0) will not be 0-dim. unless x is too + content = content if (dim > 0 or len(content) != 1) else content[0] + n = np.array(content, ndmin=dim, dtype=dtype) + if np.issubdtype(n.dtype, np.integer) or (not content): + # no error + result = special.factorial2(n, exact=exact) + # expected result is identical to n for exact=True resp. empty + # arrays (assert_allclose chokes on object), otherwise up to tol + func = assert_equal if exact or (not content) else assert_allclose + func(result, n) + else: + with pytest.raises(ValueError, match="factorial2 does not*"): + special.factorial2(n, 3) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None], + ids=["1", "1.1", "2+2j", "NaN", "None"]) + def test_factorial2_scalar_corner_cases(self, n, exact): + if n is None or n is np.nan or np.issubdtype(type(n), np.integer): + # no error + result = special.factorial2(n, exact=exact) + exp = np.nan if n is np.nan or n is None else special.factorial(n) + assert_equal(result, exp) + else: + with pytest.raises(ValueError, match="factorial2 does not*"): + special.factorial2(n, exact=exact) + + @pytest.mark.parametrize("k", range(1, 5)) + # note that n=170 is the last integer such that factorial(n) fits float64; + # use odd increment to make sure both odd & even numbers are tested + @pytest.mark.parametrize('n', range(170, 20, -29)) + def test_factorialk_accuracy(self, n, k): + # Compare exact=True vs False, i.e. that the accuracy of the + # approximation is better than the specified tolerance. + + # need to cast exact result to float due to numpy/numpy#21220 + assert_allclose(float(special.factorialk(n, k=k, exact=True)), + special.factorialk(n, k=k, exact=False)) + assert_allclose(special.factorialk([n], k=k, exact=True).astype(float), + special.factorialk([n], k=k, exact=False)) + + @pytest.mark.parametrize('k', list(range(1, 5)) + [10, 20]) + @pytest.mark.parametrize('n', + list(range(0, 22)) + list(range(22, 100, 11))) + def test_factorialk_int_reference(self, n, k): + # Compare all with correct value + + # Would be nice to use np.product here, but that's + # broken on windows, see numpy/numpy#21219 + correct = functools.reduce(operator.mul, list(range(n, 0, -k)), 1) + + assert_array_equal(correct, special.factorialk(n, k, True)) + assert_array_equal(correct, special.factorialk([n], k, True)[0]) + + assert_allclose(float(correct), special.factorialk(n, k, False)) + assert_allclose(float(correct), special.factorialk([n], k, False)[0]) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64, + np.complex128, object]) + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dim", range(0, 5)) + # test empty & non-empty arrays, with nans and mixed + @pytest.mark.parametrize("content", [[], [1], [np.nan], [np.nan, 1]], + ids=["[]", "[1]", "[NaN]", "[NaN, 1]"]) + def test_factorialk_array_corner_cases(self, content, dim, exact, dtype): + if dtype == np.int64 and any(np.isnan(x) for x in content): + pytest.skip("impossible combination") + # np.array(x, ndim=0) will not be 0-dim. unless x is too + content = content if (dim > 0 or len(content) != 1) else content[0] + n = np.array(content, ndmin=dim, dtype=dtype if exact else np.float64) + if np.issubdtype(n.dtype, np.integer) or (not content): + # no error; expected result is identical to n + assert_equal(special.factorialk(n, 3, exact=exact), n) + else: + with pytest.raises(ValueError, match="factorialk does not*"): + special.factorialk(n, 3, exact=exact) + + @pytest.mark.parametrize("exact", [True, False, None]) + @pytest.mark.parametrize("k", range(1, 5)) + @pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None], + ids=["1", "1.1", "2+2j", "NaN", "None"]) + def test_factorialk_scalar_corner_cases(self, n, k, exact): + if n is None or n is np.nan or np.issubdtype(type(n), np.integer): + if exact is None: + with pytest.deprecated_call(match="factorialk will default.*"): + result = special.factorialk(n, k=k, exact=exact) + else: + # no error + result = special.factorialk(n, k=k, exact=exact) + + nan_cond = n is np.nan or n is None + # factorialk(1, k) == 1 for all k + expected = np.nan if nan_cond else 1 + assert_equal(result, expected) + else: + with pytest.raises(ValueError, match="factorialk does not*"): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "factorialk will default") + special.factorialk(n, k=k, exact=exact) + + @pytest.mark.parametrize("k", [0, 1.1, np.nan, "1"]) + def test_factorialk_raises_k(self, k): + with pytest.raises(ValueError, match="k must be a positive integer*"): + special.factorialk(1, k) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("k", range(1, 12)) + def test_factorialk_dtype(self, k, exact): + kw = {"k": k, "exact": exact} + if exact and k in _FACTORIALK_LIMITS_64BITS.keys(): + n = np.array([_FACTORIALK_LIMITS_32BITS[k]]) + assert_equal(special.factorialk(n, **kw).dtype, np_long) + assert_equal(special.factorialk(n + 1, **kw).dtype, np.int64) + # assert maximality of limits for given dtype + assert special.factorialk(n + 1, **kw) > np.iinfo(np.int32).max + + n = np.array([_FACTORIALK_LIMITS_64BITS[k]]) + assert_equal(special.factorialk(n, **kw).dtype, np.int64) + assert_equal(special.factorialk(n + 1, **kw).dtype, object) + assert special.factorialk(n + 1, **kw) > np.iinfo(np.int64).max + else: + n = np.array([_FACTORIALK_LIMITS_64BITS.get(k, 1)]) + # for exact=True and k >= 10, we always return object; + # for exact=False it's always float + dtype = object if exact else np.float64 + assert_equal(special.factorialk(n, **kw).dtype, dtype) + + def test_factorial_mixed_nan_inputs(self): + x = np.array([np.nan, 1, 2, 3, np.nan]) + expected = np.array([np.nan, 1, 2, 6, np.nan]) + assert_equal(special.factorial(x, exact=False), expected) + with pytest.raises(ValueError, match="factorial with `exact=True.*"): + special.factorial(x, exact=True) + + +class TestFresnel: + @pytest.mark.parametrize("z, s, c", [ + # some positive value + (.5, 0.064732432859999287, 0.49234422587144644), + (.5 + .0j, 0.064732432859999287, 0.49234422587144644), + # negative half annulus + # https://github.com/scipy/scipy/issues/12309 + # Reference values can be reproduced with + # https://www.wolframalpha.com/input/?i=FresnelS%5B-2.0+%2B+0.1i%5D + # https://www.wolframalpha.com/input/?i=FresnelC%5B-2.0+%2B+0.1i%5D + ( + -2.0 + 0.1j, + -0.3109538687728942-0.0005870728836383176j, + -0.4879956866358554+0.10670801832903172j + ), + ( + -0.1 - 1.5j, + -0.03918309471866977+0.7197508454568574j, + 0.09605692502968956-0.43625191013617465j + ), + # a different algorithm kicks in for "large" values, i.e., |z| >= 4.5, + # make sure to test both float and complex values; a different + # algorithm is used + (6.0, 0.44696076, 0.49953147), + (6.0 + 0.0j, 0.44696076, 0.49953147), + (6.0j, -0.44696076j, 0.49953147j), + (-6.0 + 0.0j, -0.44696076, -0.49953147), + (-6.0j, 0.44696076j, -0.49953147j), + # inf + (np.inf, 0.5, 0.5), + (-np.inf, -0.5, -0.5), + ]) + def test_fresnel_values(self, z, s, c): + frs = array(special.fresnel(z)) + assert_array_almost_equal(frs, array([s, c]), 8) + + # values from pg 329 Table 7.11 of A & S + # slightly corrected in 4th decimal place + def test_fresnel_zeros(self): + szo, czo = special.fresnel_zeros(5) + assert_array_almost_equal(szo, + array([2.0093+0.2885j, + 2.8335+0.2443j, + 3.4675+0.2185j, + 4.0026+0.2009j, + 4.4742+0.1877j]),3) + assert_array_almost_equal(czo, + array([1.7437+0.3057j, + 2.6515+0.2529j, + 3.3204+0.2240j, + 3.8757+0.2047j, + 4.3611+0.1907j]),3) + vals1 = special.fresnel(szo)[0] + vals2 = special.fresnel(czo)[1] + assert_array_almost_equal(vals1,0,14) + assert_array_almost_equal(vals2,0,14) + + def test_fresnelc_zeros(self): + szo, czo = special.fresnel_zeros(6) + frc = special.fresnelc_zeros(6) + assert_array_almost_equal(frc,czo,12) + + def test_fresnels_zeros(self): + szo, czo = special.fresnel_zeros(5) + frs = special.fresnels_zeros(5) + assert_array_almost_equal(frs,szo,12) + + +class TestGamma: + def test_gamma(self): + gam = special.gamma(5) + assert_equal(gam,24.0) + + def test_gammaln(self): + gamln = special.gammaln(3) + lngam = log(special.gamma(3)) + assert_almost_equal(gamln,lngam,8) + + def test_gammainccinv(self): + gccinv = special.gammainccinv(.5,.5) + gcinv = special.gammaincinv(.5,.5) + assert_almost_equal(gccinv,gcinv,8) + + @with_special_errors + def test_gammaincinv(self): + y = special.gammaincinv(.4,.4) + x = special.gammainc(.4,y) + assert_almost_equal(x,0.4,1) + y = special.gammainc(10, 0.05) + x = special.gammaincinv(10, 2.5715803516000736e-20) + assert_almost_equal(0.05, x, decimal=10) + assert_almost_equal(y, 2.5715803516000736e-20, decimal=10) + x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18) + assert_almost_equal(11.0, x, decimal=10) + + @with_special_errors + def test_975(self): + # Regression test for ticket #975 -- switch point in algorithm + # check that things work OK at the point, immediately next floats + # around it, and a bit further away + pts = [0.25, + np.nextafter(0.25, 0), 0.25 - 1e-12, + np.nextafter(0.25, 1), 0.25 + 1e-12] + for xp in pts: + y = special.gammaincinv(.4, xp) + x = special.gammainc(0.4, y) + assert_allclose(x, xp, rtol=1e-12) + + def test_rgamma(self): + rgam = special.rgamma(8) + rlgam = 1/special.gamma(8) + assert_almost_equal(rgam,rlgam,8) + + def test_infinity(self): + assert_(np.isinf(special.gamma(-1))) + assert_equal(special.rgamma(-1), 0) + + +class TestHankel: + + def test_negv1(self): + assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14) + + def test_hankel1(self): + hank1 = special.hankel1(1,.1) + hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j) + assert_almost_equal(hank1,hankrl,8) + + def test_negv1e(self): + assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14) + + def test_hankel1e(self): + hank1e = special.hankel1e(1,.1) + hankrle = special.hankel1(1,.1)*exp(-.1j) + assert_almost_equal(hank1e,hankrle,8) + + def test_negv2(self): + assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14) + + def test_hankel2(self): + hank2 = special.hankel2(1,.1) + hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j) + assert_almost_equal(hank2,hankrl2,8) + + def test_neg2e(self): + assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14) + + def test_hankl2e(self): + hank2e = special.hankel2e(1,.1) + hankrl2e = special.hankel2e(1,.1) + assert_almost_equal(hank2e,hankrl2e,8) + + +class TestHyper: + def test_h1vp(self): + h1 = special.h1vp(1,.1) + h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j) + assert_almost_equal(h1,h1real,8) + + def test_h2vp(self): + h2 = special.h2vp(1,.1) + h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j) + assert_almost_equal(h2,h2real,8) + + def test_hyp0f1(self): + # scalar input + assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12) + assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15) + + # float input, expected values match mpmath + x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5]) + expected = np.array([0.58493659229143, 0.70566805723127, 1.0, + 1.37789689539747, 1.60373685288480]) + assert_allclose(x, expected, rtol=1e-12) + + # complex input + x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j) + assert_allclose(x, expected.astype(complex), rtol=1e-12) + + # test broadcasting + x1 = [0.5, 1.5, 2.5] + x2 = [0, 1, 0.5] + x = special.hyp0f1(x1, x2) + expected = [1.0, 1.8134302039235093, 1.21482702689997] + assert_allclose(x, expected, rtol=1e-12) + x = special.hyp0f1(np.vstack([x1] * 2), x2) + assert_allclose(x, np.vstack([expected] * 2), rtol=1e-12) + assert_raises(ValueError, special.hyp0f1, + np.vstack([x1] * 3), [0, 1]) + + def test_hyp0f1_gh5764(self): + # Just checks the point that failed; there's a more systematic + # test in test_mpmath + res = special.hyp0f1(0.8, 0.5 + 0.5*1J) + # The expected value was generated using mpmath + assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665) + + def test_hyp1f1(self): + hyp1 = special.hyp1f1(.1,.1,.3) + assert_almost_equal(hyp1, 1.3498588075760032,7) + + # test contributed by Moritz Deger (2008-05-29) + # https://github.com/scipy/scipy/issues/1186 (Trac #659) + + # reference data obtained from mathematica [ a, b, x, m(a,b,x)]: + # produced with test_hyp1f1.nb + ref_data = array([ + [-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04], + [2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00], + [-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05], + [5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08], + [-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24], + [4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21], + [1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13], + [2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13], + [1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02], + [1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10], + [-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01], + [8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21], + [1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20], + [-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07], + [2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03], + [2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02], + [6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11], + [-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03], + [2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17], + [8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01], + [1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00], + [-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00], + [2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23], + [-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01], + [3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04], + [-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08], + [2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01], + [-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07], + [1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03], + [-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09], + [-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06], + [-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00], + [-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01], + [3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02], + [6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02], + [-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02], + [2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00], + [1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09], + [1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01], + [1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00], + [1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02], + [-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05], + [-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05], + [7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02], + [2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02], + [-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13], + [-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05], + [-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12], + [-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01], + [-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16], + [2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37], + [5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06], + [-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02], + [-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12], + [5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27], + [-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04], + [1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06], + [2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07], + [5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03], + [-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07], + [1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27], + [6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12], + [1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32], + [-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04], + [-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01], + [-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02], + [-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19], + [1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09], + [2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31], + [-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01], + [2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02], + [-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08], + [2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09], + [1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33], + [-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01], + [7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29], + [2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01], + [8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29], + [-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02], + [-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00], + [-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08], + [-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01], + [-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01], + [-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01], + [6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13], + [-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11], + [-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02], + [6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02], + [-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01], + [7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31], + [-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04], + [5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25], + [3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01], + [-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00], + [2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02], + [2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05], + [-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02], + [-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01], + [-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01], + [-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00] + ]) + + for a,b,c,expected in ref_data: + result = special.hyp1f1(a,b,c) + assert_(abs(expected - result)/expected < 1e-4) + + def test_hyp1f1_gh2957(self): + hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933) + hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934) + assert_almost_equal(hyp1, hyp2, 12) + + def test_hyp1f1_gh2282(self): + hyp = special.hyp1f1(0.5, 1.5, -1000) + assert_almost_equal(hyp, 0.028024956081989643, 12) + + def test_hyp2f1(self): + # a collection of special cases taken from AMS 55 + values = [ + [0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))], + [0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)], + [1, 1, 2, 0.2, -1/0.2*log(1-0.2)], + [3, 3.5, 1.5, 0.2**2, 0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))], + [-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)], + [3, 4, 8, 1, + special.gamma(8) * special.gamma(8-4-3) + / special.gamma(8-3) / special.gamma(8-4)], + [3, 2, 3-2+1, -1, + 1./2**3*sqrt(pi) * special.gamma(1+3-2) + / special.gamma(1+0.5*3-2) / special.gamma(0.5+0.5*3)], + [5, 2, 5-2+1, -1, + 1./2**5*sqrt(pi) * special.gamma(1+5-2) + / special.gamma(1+0.5*5-2) / special.gamma(0.5+0.5*5)], + [4, 0.5+4, 1.5-2*4, -1./3, + (8./9)**(-2*4)*special.gamma(4./3) * special.gamma(1.5-2*4) + / special.gamma(3./2) / special.gamma(4./3-2*4)], + # and some others + # ticket #424 + [1.5, -0.5, 1.0, -10.0, 4.1300097765277476484], + # negative integer a or b, with c-a-b integer and x > 0.9 + [-2,3,1,0.95,0.715], + [2,-3,1,0.95,-0.007], + [-6,3,1,0.95,0.0000810625], + [2,-5,1,0.95,-0.000029375], + # huge negative integers + (10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24), + (10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18), + ] + for i, (a, b, c, x, v) in enumerate(values): + cv = special.hyp2f1(a, b, c, x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_hyperu(self): + val1 = special.hyperu(1,0.1,100) + assert_almost_equal(val1,0.0098153,7) + a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2] + a,b = asarray(a), asarray(b) + z = 0.5 + hypu = special.hyperu(a,b,z) + hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) / + (special.gamma(1+a-b)*special.gamma(b)) - + z**(1-b)*special.hyp1f1(1+a-b,2-b,z) + / (special.gamma(a)*special.gamma(2-b))) + assert_array_almost_equal(hypu,hprl,12) + + def test_hyperu_gh2287(self): + assert_almost_equal(special.hyperu(1, 1.5, 20.2), + 0.048360918656699191, 12) + + +class TestBessel: + def test_itj0y0(self): + it0 = array(special.itj0y0(.2)) + assert_array_almost_equal( + it0, + array([0.19933433254006822, -0.34570883800412566]), + 8, + ) + + def test_it2j0y0(self): + it2 = array(special.it2j0y0(.2)) + assert_array_almost_equal( + it2, + array([0.0049937546274601858, -0.43423067011231614]), + 8, + ) + + def test_negv_iv(self): + assert_equal(special.iv(3,2), special.iv(-3,2)) + + def test_j0(self): + oz = special.j0(.1) + ozr = special.jn(0,.1) + assert_almost_equal(oz,ozr,8) + + def test_j1(self): + o1 = special.j1(.1) + o1r = special.jn(1,.1) + assert_almost_equal(o1,o1r,8) + + def test_jn(self): + jnnr = special.jn(1,.2) + assert_almost_equal(jnnr,0.099500832639235995,8) + + def test_negv_jv(self): + assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14) + + def test_jv(self): + values = [[0, 0.1, 0.99750156206604002], + [2./3, 1e-8, 0.3239028506761532e-5], + [2./3, 1e-10, 0.1503423854873779e-6], + [3.1, 1e-10, 0.1711956265409013e-32], + [2./3, 4.0, -0.2325440850267039], + ] + for i, (v, x, y) in enumerate(values): + yc = special.jv(v, x) + assert_almost_equal(yc, y, 8, err_msg='test #%d' % i) + + def test_negv_jve(self): + assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14) + + def test_jve(self): + jvexp = special.jve(1,.2) + assert_almost_equal(jvexp,0.099500832639235995,8) + jvexp1 = special.jve(1,.2+1j) + z = .2+1j + jvexpr = special.jv(1,z)*exp(-abs(z.imag)) + assert_almost_equal(jvexp1,jvexpr,8) + + def test_jn_zeros(self): + jn0 = special.jn_zeros(0,5) + jn1 = special.jn_zeros(1,5) + assert_array_almost_equal(jn0,array([2.4048255577, + 5.5200781103, + 8.6537279129, + 11.7915344391, + 14.9309177086]),4) + assert_array_almost_equal(jn1,array([3.83171, + 7.01559, + 10.17347, + 13.32369, + 16.47063]),4) + + jn102 = special.jn_zeros(102,5) + assert_allclose(jn102, array([110.89174935992040343, + 117.83464175788308398, + 123.70194191713507279, + 129.02417238949092824, + 134.00114761868422559]), rtol=1e-13) + + jn301 = special.jn_zeros(301,5) + assert_allclose(jn301, array([313.59097866698830153, + 323.21549776096288280, + 331.22338738656748796, + 338.39676338872084500, + 345.03284233056064157]), rtol=1e-13) + + def test_jn_zeros_slow(self): + jn0 = special.jn_zeros(0, 300) + assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13) + assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13) + assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13) + + jn10 = special.jn_zeros(10, 300) + assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13) + assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13) + assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13) + + jn3010 = special.jn_zeros(3010,5) + assert_allclose(jn3010, array([3036.86590780927, + 3057.06598526482, + 3073.66360690272, + 3088.37736494778, + 3101.86438139042]), rtol=1e-8) + + def test_jnjnp_zeros(self): + jn = special.jn + + def jnp(n, x): + return (jn(n-1,x) - jn(n+1,x))/2 + for nt in range(1, 30): + z, n, m, t = special.jnjnp_zeros(nt) + for zz, nn, tt in zip(z, n, t): + if tt == 0: + assert_allclose(jn(nn, zz), 0, atol=1e-6) + elif tt == 1: + assert_allclose(jnp(nn, zz), 0, atol=1e-6) + else: + raise AssertionError("Invalid t return for nt=%d" % nt) + + def test_jnp_zeros(self): + jnp = special.jnp_zeros(1,5) + assert_array_almost_equal(jnp, array([1.84118, + 5.33144, + 8.53632, + 11.70600, + 14.86359]),4) + jnp = special.jnp_zeros(443,5) + assert_allclose(special.jvp(443, jnp), 0, atol=1e-15) + + def test_jnyn_zeros(self): + jnz = special.jnyn_zeros(1,5) + assert_array_almost_equal(jnz,(array([3.83171, + 7.01559, + 10.17347, + 13.32369, + 16.47063]), + array([1.84118, + 5.33144, + 8.53632, + 11.70600, + 14.86359]), + array([2.19714, + 5.42968, + 8.59601, + 11.74915, + 14.89744]), + array([3.68302, + 6.94150, + 10.12340, + 13.28576, + 16.44006])),5) + + def test_jvp(self): + jvprim = special.jvp(2,2) + jv0 = (special.jv(1,2)-special.jv(3,2))/2 + assert_almost_equal(jvprim,jv0,10) + + def test_k0(self): + ozk = special.k0(.1) + ozkr = special.kv(0,.1) + assert_almost_equal(ozk,ozkr,8) + + def test_k0e(self): + ozke = special.k0e(.1) + ozker = special.kve(0,.1) + assert_almost_equal(ozke,ozker,8) + + def test_k1(self): + o1k = special.k1(.1) + o1kr = special.kv(1,.1) + assert_almost_equal(o1k,o1kr,8) + + def test_k1e(self): + o1ke = special.k1e(.1) + o1ker = special.kve(1,.1) + assert_almost_equal(o1ke,o1ker,8) + + def test_jacobi(self): + a = 5*np.random.random() - 1 + b = 5*np.random.random() - 1 + P0 = special.jacobi(0,a,b) + P1 = special.jacobi(1,a,b) + P2 = special.jacobi(2,a,b) + P3 = special.jacobi(3,a,b) + + assert_array_almost_equal(P0.c,[1],13) + assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13) + cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)] + p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]] + assert_array_almost_equal(P2.c,array(p2c)/8.0,13) + cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3), + 12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)] + p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]] + assert_array_almost_equal(P3.c,array(p3c)/48.0,13) + + def test_kn(self): + kn1 = special.kn(0,.2) + assert_almost_equal(kn1,1.7527038555281462,8) + + def test_negv_kv(self): + assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2)) + + def test_kv0(self): + kv0 = special.kv(0,.2) + assert_almost_equal(kv0, 1.7527038555281462, 10) + + def test_kv1(self): + kv1 = special.kv(1,0.2) + assert_almost_equal(kv1, 4.775972543220472, 10) + + def test_kv2(self): + kv2 = special.kv(2,0.2) + assert_almost_equal(kv2, 49.51242928773287, 10) + + def test_kn_largeorder(self): + assert_allclose(special.kn(32, 1), 1.7516596664574289e+43) + + def test_kv_largearg(self): + assert_equal(special.kv(0, 1e19), 0) + + def test_negv_kve(self): + assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2)) + + def test_kve(self): + kve1 = special.kve(0,.2) + kv1 = special.kv(0,.2)*exp(.2) + assert_almost_equal(kve1,kv1,8) + z = .2+1j + kve2 = special.kve(0,z) + kv2 = special.kv(0,z)*exp(z) + assert_almost_equal(kve2,kv2,8) + + def test_kvp_v0n1(self): + z = 2.2 + assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10) + + def test_kvp_n1(self): + v = 3. + z = 2.2 + xc = -special.kv(v+1,z) + v/z*special.kv(v,z) + x = special.kvp(v,z, n=1) + assert_almost_equal(xc, x, 10) # this function (kvp) is broken + + def test_kvp_n2(self): + v = 3. + z = 2.2 + xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z + x = special.kvp(v, z, n=2) + assert_almost_equal(xc, x, 10) + + def test_y0(self): + oz = special.y0(.1) + ozr = special.yn(0,.1) + assert_almost_equal(oz,ozr,8) + + def test_y1(self): + o1 = special.y1(.1) + o1r = special.yn(1,.1) + assert_almost_equal(o1,o1r,8) + + def test_y0_zeros(self): + yo,ypo = special.y0_zeros(2) + zo,zpo = special.y0_zeros(2,complex=1) + all = r_[yo,zo] + allval = r_[ypo,zpo] + assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11) + assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11) + + def test_y1_zeros(self): + y1 = special.y1_zeros(1) + assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5) + + def test_y1p_zeros(self): + y1p = special.y1p_zeros(1,complex=1) + assert_array_almost_equal( + y1p, + (array([0.5768+0.904j]), array([-0.7635+0.5892j])), + 3, + ) + + def test_yn_zeros(self): + an = special.yn_zeros(4,2) + assert_array_almost_equal(an,array([5.64515, 9.36162]),5) + an = special.yn_zeros(443,5) + assert_allclose(an, [450.13573091578090314, + 463.05692376675001542, + 472.80651546418663566, + 481.27353184725625838, + 488.98055964441374646], + rtol=1e-15,) + + def test_ynp_zeros(self): + ao = special.ynp_zeros(0,2) + assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6) + ao = special.ynp_zeros(43,5) + assert_allclose(special.yvp(43, ao), 0, atol=1e-15) + ao = special.ynp_zeros(443,5) + assert_allclose(special.yvp(443, ao), 0, atol=1e-9) + + def test_ynp_zeros_large_order(self): + ao = special.ynp_zeros(443,5) + assert_allclose(special.yvp(443, ao), 0, atol=1e-14) + + def test_yn(self): + yn2n = special.yn(1,.2) + assert_almost_equal(yn2n,-3.3238249881118471,8) + + def test_negv_yv(self): + assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14) + + def test_yv(self): + yv2 = special.yv(1,.2) + assert_almost_equal(yv2,-3.3238249881118471,8) + + def test_negv_yve(self): + assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14) + + def test_yve(self): + yve2 = special.yve(1,.2) + assert_almost_equal(yve2,-3.3238249881118471,8) + yve2r = special.yv(1,.2+1j)*exp(-1) + yve22 = special.yve(1,.2+1j) + assert_almost_equal(yve22,yve2r,8) + + def test_yvp(self): + yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0 + yvp1 = special.yvp(2,.2) + assert_array_almost_equal(yvp1,yvpr,10) + + def _cephes_vs_amos_points(self): + """Yield points at which to compare Cephes implementation to AMOS""" + # check several points, including large-amplitude ones + v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301] + z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300, + 10003] + yield from itertools.product(v, z) + + # check half-integers; these are problematic points at least + # for cephes/iv + yield from itertools.product(0.5 + arange(-60, 60), [3.5]) + + def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None): + for v, z in self._cephes_vs_amos_points(): + if skip is not None and skip(v, z): + continue + c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z) + if np.isinf(c1): + assert_(np.abs(c2) >= 1e300, (v, z)) + elif np.isnan(c1): + assert_(c2.imag != 0, (v, z)) + else: + assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol) + if v == int(v): + assert_allclose(c3, c2, err_msg=(v, z), + rtol=rtol, atol=atol) + + @pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") + def test_jv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305) + + @pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") + def test_yv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305) + + def test_yv_cephes_vs_amos_only_small_orders(self): + def skipper(v, z): + return abs(v) > 50 + self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, + skip=skipper) + + def test_iv_cephes_vs_amos(self): + with np.errstate(all='ignore'): + self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305) + + @pytest.mark.slow + def test_iv_cephes_vs_amos_mass_test(self): + N = 1000000 + np.random.seed(1) + v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N) + x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N) + + imsk = (np.random.randint(8, size=N) == 0) + v[imsk] = v[imsk].astype(np.int64) + + with np.errstate(all='ignore'): + c1 = special.iv(v, x) + c2 = special.iv(v, x+0j) + + # deal with differences in the inf and zero cutoffs + c1[abs(c1) > 1e300] = np.inf + c2[abs(c2) > 1e300] = np.inf + c1[abs(c1) < 1e-300] = 0 + c2[abs(c2) < 1e-300] = 0 + + dc = abs(c1/c2 - 1) + dc[np.isnan(dc)] = 0 + + k = np.argmax(dc) + + # Most error apparently comes from AMOS and not our implementation; + # there are some problems near integer orders there + assert_( + dc[k] < 2e-7, + (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)) + ) + + def test_kv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305) + self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305) + + def test_ticket_623(self): + assert_allclose(special.jv(3, 4), 0.43017147387562193) + assert_allclose(special.jv(301, 1300), 0.0183487151115275) + assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048) + + def test_ticket_853(self): + """Negative-order Bessels""" + # cephes + assert_allclose(special.jv(-1, 1), -0.4400505857449335) + assert_allclose(special.jv(-2, 1), 0.1149034849319005) + assert_allclose(special.yv(-1, 1), 0.7812128213002887) + assert_allclose(special.yv(-2, 1), -1.650682606816255) + assert_allclose(special.iv(-1, 1), 0.5651591039924851) + assert_allclose(special.iv(-2, 1), 0.1357476697670383) + assert_allclose(special.kv(-1, 1), 0.6019072301972347) + assert_allclose(special.kv(-2, 1), 1.624838898635178) + assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952) + assert_allclose(special.yv(-0.5, 1), 0.6713967071418031) + assert_allclose(special.iv(-0.5, 1), 1.231200214592967) + assert_allclose(special.kv(-0.5, 1), 0.4610685044478945) + # amos + assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335) + assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005) + assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887) + assert_allclose(special.yv(-2, 1+0j), -1.650682606816255) + + assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851) + assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383) + assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347) + assert_allclose(special.kv(-2, 1+0j), 1.624838898635178) + + assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952) + assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j) + assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031) + assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j) + + assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967) + assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j) + assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945) + assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j) + + assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3)) + assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3)) + assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3)) + assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j)) + + assert_allclose( + special.hankel1(-0.5, 1+1j), + special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j) + ) + assert_allclose( + special.hankel2(-0.5, 1+1j), + special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j) + ) + + def test_ticket_854(self): + """Real-valued Bessel domains""" + assert_(isnan(special.jv(0.5, -1))) + assert_(isnan(special.iv(0.5, -1))) + assert_(isnan(special.yv(0.5, -1))) + assert_(isnan(special.yv(1, -1))) + assert_(isnan(special.kv(0.5, -1))) + assert_(isnan(special.kv(1, -1))) + assert_(isnan(special.jve(0.5, -1))) + assert_(isnan(special.ive(0.5, -1))) + assert_(isnan(special.yve(0.5, -1))) + assert_(isnan(special.yve(1, -1))) + assert_(isnan(special.kve(0.5, -1))) + assert_(isnan(special.kve(1, -1))) + assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1)) + assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1)) + + def test_gh_7909(self): + assert_(special.kv(1.5, 0) == np.inf) + assert_(special.kve(1.5, 0) == np.inf) + + def test_ticket_503(self): + """Real-valued Bessel I overflow""" + assert_allclose(special.iv(1, 700), 1.528500390233901e302) + assert_allclose(special.iv(1000, 1120), 1.301564549405821e301) + + def test_iv_hyperg_poles(self): + assert_allclose(special.iv(-0.5, 1), 1.231200214592967) + + def iv_series(self, v, z, n=200): + k = arange(0, n).astype(double) + r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1) + r[isnan(r)] = inf + r = exp(r) + err = abs(r).max() * finfo(double).eps * n + abs(r[-1])*10 + return r.sum(), err + + def test_i0_series(self): + for z in [1., 10., 200.5]: + value, err = self.iv_series(0, z) + assert_allclose(special.i0(z), value, atol=err, err_msg=z) + + def test_i1_series(self): + for z in [1., 10., 200.5]: + value, err = self.iv_series(1, z) + assert_allclose(special.i1(z), value, atol=err, err_msg=z) + + def test_iv_series(self): + for v in [-20., -10., -1., 0., 1., 12.49, 120.]: + for z in [1., 10., 200.5, -1+2j]: + value, err = self.iv_series(v, z) + assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z)) + + def test_i0(self): + values = [[0.0, 1.0], + [1e-10, 1.0], + [0.1, 0.9071009258], + [0.5, 0.6450352706], + [1.0, 0.4657596077], + [2.5, 0.2700464416], + [5.0, 0.1835408126], + [20.0, 0.0897803119], + ] + for i, (x, v) in enumerate(values): + cv = special.i0(x) * exp(-x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_i0e(self): + oize = special.i0e(.1) + oizer = special.ive(0,.1) + assert_almost_equal(oize,oizer,8) + + def test_i1(self): + values = [[0.0, 0.0], + [1e-10, 0.4999999999500000e-10], + [0.1, 0.0452984468], + [0.5, 0.1564208032], + [1.0, 0.2079104154], + [5.0, 0.1639722669], + [20.0, 0.0875062222], + ] + for i, (x, v) in enumerate(values): + cv = special.i1(x) * exp(-x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_i1e(self): + oi1e = special.i1e(.1) + oi1er = special.ive(1,.1) + assert_almost_equal(oi1e,oi1er,8) + + def test_iti0k0(self): + iti0 = array(special.iti0k0(5)) + assert_array_almost_equal( + iti0, + array([31.848667776169801, 1.5673873907283657]), + 5, + ) + + def test_it2i0k0(self): + it2k = special.it2i0k0(.1) + assert_array_almost_equal( + it2k, + array([0.0012503906973464409, 3.3309450354686687]), + 6, + ) + + def test_iv(self): + iv1 = special.iv(0,.1)*exp(-.1) + assert_almost_equal(iv1,0.90710092578230106,10) + + def test_negv_ive(self): + assert_equal(special.ive(3,2), special.ive(-3,2)) + + def test_ive(self): + ive1 = special.ive(0,.1) + iv1 = special.iv(0,.1)*exp(-.1) + assert_almost_equal(ive1,iv1,10) + + def test_ivp0(self): + assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10) + + def test_ivp(self): + y = (special.iv(0,2) + special.iv(2,2))/2 + x = special.ivp(1,2) + assert_almost_equal(x,y,10) + + +class TestLaguerre: + def test_laguerre(self): + lag0 = special.laguerre(0) + lag1 = special.laguerre(1) + lag2 = special.laguerre(2) + lag3 = special.laguerre(3) + lag4 = special.laguerre(4) + lag5 = special.laguerre(5) + assert_array_almost_equal(lag0.c,[1],13) + assert_array_almost_equal(lag1.c,[-1,1],13) + assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13) + assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13) + assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13) + assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13) + + def test_genlaguerre(self): + k = 5*np.random.random() - 0.9 + lag0 = special.genlaguerre(0,k) + lag1 = special.genlaguerre(1,k) + lag2 = special.genlaguerre(2,k) + lag3 = special.genlaguerre(3,k) + assert_equal(lag0.c, [1]) + assert_equal(lag1.c, [-1, k + 1]) + assert_almost_equal( + lag2.c, + array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0 + ) + assert_almost_equal( + lag3.c, + array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0 + ) + + +# Base polynomials come from Abrahmowitz and Stegan +class TestLegendre: + def test_legendre(self): + leg0 = special.legendre(0) + leg1 = special.legendre(1) + leg2 = special.legendre(2) + leg3 = special.legendre(3) + leg4 = special.legendre(4) + leg5 = special.legendre(5) + assert_equal(leg0.c, [1]) + assert_equal(leg1.c, [1,0]) + assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13) + assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0) + assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0) + assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0) + + @pytest.mark.parametrize('n', [1, 2, 3, 4, 5]) + @pytest.mark.parametrize('zr', [0.5241717, 12.80232, -9.699001, + 0.5122437, 0.1714377]) + @pytest.mark.parametrize('zi', [9.766818, 0.2999083, 8.24726, -22.84843, + -0.8792666]) + def test_lpn_against_clpmn(self, n, zr, zi): + reslpn = special.lpn(n, zr + zi*1j) + resclpmn = special.clpmn(0, n, zr+zi*1j) + assert_allclose(reslpn[0], resclpmn[0][0]) + assert_allclose(reslpn[1], resclpmn[1][0]) + + +class TestLambda: + def test_lmbda(self): + lam = special.lmbda(1,.1) + lamr = ( + array([special.jn(0,.1), 2*special.jn(1,.1)/.1]), + array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]) + ) + assert_array_almost_equal(lam,lamr,8) + + +class TestLog1p: + def test_log1p(self): + l1p = (special.log1p(10), special.log1p(11), special.log1p(12)) + l1prl = (log(11), log(12), log(13)) + assert_array_almost_equal(l1p,l1prl,8) + + def test_log1pmore(self): + l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2)) + l1pmrl = (log(2),log(2.1),log(2.2)) + assert_array_almost_equal(l1pm,l1pmrl,8) + + +class TestLegendreFunctions: + def test_clpmn(self): + z = 0.5+0.3j + clp = special.clpmn(2, 2, z, 3) + assert_array_almost_equal(clp, + (array([[1.0000, z, 0.5*(3*z*z-1)], + [0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)], + [0.0000, 0.0000, 3*(z*z-1)]]), + array([[0.0000, 1.0000, 3*z], + [0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)], + [0.0000, 0.0000, 6*z]])), + 7) + + def test_clpmn_close_to_real_2(self): + eps = 1e-10 + m = 1 + n = 3 + x = 0.5 + clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n] + clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n] + assert_array_almost_equal(array([clp_plus, clp_minus]), + array([special.lpmv(m, n, x), + special.lpmv(m, n, x)]), + 7) + + def test_clpmn_close_to_real_3(self): + eps = 1e-10 + m = 1 + n = 3 + x = 0.5 + clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n] + clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n] + assert_array_almost_equal(array([clp_plus, clp_minus]), + array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi), + special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]), + 7) + + def test_clpmn_across_unit_circle(self): + eps = 1e-7 + m = 1 + n = 1 + x = 1j + for type in [2, 3]: + assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n], + special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6) + + def test_inf(self): + for z in (1, -1): + for n in range(4): + for m in range(1, n): + lp = special.clpmn(m, n, z) + assert_(np.isinf(lp[1][1,1:]).all()) + lp = special.lpmn(m, n, z) + assert_(np.isinf(lp[1][1,1:]).all()) + + def test_deriv_clpmn(self): + # data inside and outside of the unit circle + zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j, + 1+1j, -1+1j, -1-1j, 1-1j] + m = 2 + n = 3 + for type in [2, 3]: + for z in zvals: + for h in [1e-3, 1e-3j]: + approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0] + - special.clpmn(m, n, z-0.5*h, type)[0])/h + assert_allclose(special.clpmn(m, n, z, type)[1], + approx_derivative, + rtol=1e-4) + + def test_lpmn(self): + lp = special.lpmn(0,2,.5) + assert_array_almost_equal(lp,(array([[1.00000, + 0.50000, + -0.12500]]), + array([[0.00000, + 1.00000, + 1.50000]])),4) + + def test_lpn(self): + lpnf = special.lpn(2,.5) + assert_array_almost_equal(lpnf,(array([1.00000, + 0.50000, + -0.12500]), + array([0.00000, + 1.00000, + 1.50000])),4) + + def test_lpmv(self): + lp = special.lpmv(0,2,.5) + assert_almost_equal(lp,-0.125,7) + lp = special.lpmv(0,40,.001) + assert_almost_equal(lp,0.1252678976534484,7) + + # XXX: this is outside the domain of the current implementation, + # so ensure it returns a NaN rather than a wrong answer. + with np.errstate(all='ignore'): + lp = special.lpmv(-1,-1,.001) + assert_(lp != 0 or np.isnan(lp)) + + def test_lqmn(self): + lqmnf = special.lqmn(0,2,.5) + lqf = special.lqn(2,.5) + assert_array_almost_equal(lqmnf[0][0],lqf[0],4) + assert_array_almost_equal(lqmnf[1][0],lqf[1],4) + + def test_lqmn_gt1(self): + """algorithm for real arguments changes at 1.0001 + test against analytical result for m=2, n=1 + """ + x0 = 1.0001 + delta = 0.00002 + for x in (x0-delta, x0+delta): + lq = special.lqmn(2, 1, x)[0][-1, -1] + expected = 2/(x*x-1) + assert_almost_equal(lq, expected) + + def test_lqmn_shape(self): + a, b = special.lqmn(4, 4, 1.1) + assert_equal(a.shape, (5, 5)) + assert_equal(b.shape, (5, 5)) + + a, b = special.lqmn(4, 0, 1.1) + assert_equal(a.shape, (5, 1)) + assert_equal(b.shape, (5, 1)) + + def test_lqn(self): + lqf = special.lqn(2,.5) + assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]), + array([1.3333, 1.216, -0.8427])),4) + + +class TestMathieu: + + def test_mathieu_a(self): + pass + + def test_mathieu_even_coef(self): + special.mathieu_even_coef(2,5) + # Q not defined broken and cannot figure out proper reporting order + + def test_mathieu_odd_coef(self): + # same problem as above + pass + + +class TestFresnelIntegral: + + def test_modfresnelp(self): + pass + + def test_modfresnelm(self): + pass + + +class TestOblCvSeq: + def test_obl_cv_seq(self): + obl = special.obl_cv_seq(0,3,1) + assert_array_almost_equal(obl,array([-0.348602, + 1.393206, + 5.486800, + 11.492120]),5) + + +class TestParabolicCylinder: + def test_pbdn_seq(self): + pb = special.pbdn_seq(1,.1) + assert_array_almost_equal(pb,(array([0.9975, + 0.0998]), + array([-0.0499, + 0.9925])),4) + + def test_pbdv(self): + special.pbdv(1,.2) + 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0] + + def test_pbdv_seq(self): + pbn = special.pbdn_seq(1,.1) + pbv = special.pbdv_seq(1,.1) + assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4) + + def test_pbdv_points(self): + # simple case + eta = np.linspace(-10, 10, 5) + z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta) + assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14) + + # some points + assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12) + assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12) + + def test_pbdv_gradient(self): + x = np.linspace(-4, 4, 8)[:,None] + eta = np.linspace(-10, 10, 5)[None,:] + + p = special.pbdv(eta, x) + eps = 1e-7 + 1e-7*abs(x) + dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2. + assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6) + + def test_pbvv_gradient(self): + x = np.linspace(-4, 4, 8)[:,None] + eta = np.linspace(-10, 10, 5)[None,:] + + p = special.pbvv(eta, x) + eps = 1e-7 + 1e-7*abs(x) + dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2. + assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6) + + def test_pbvv_seq(self): + res1, res2 = special.pbvv_seq(2, 3) + assert_allclose(res1, np.array([2.976319645712036, + 1.358840996329579, + 0.5501016716383508])) + assert_allclose(res2, np.array([3.105638472238475, + 0.9380581512176672, + 0.533688488872053])) + + +class TestPolygamma: + # from Table 6.2 (pg. 271) of A&S + def test_polygamma(self): + poly2 = special.polygamma(2,1) + poly3 = special.polygamma(3,1) + assert_almost_equal(poly2,-2.4041138063,10) + assert_almost_equal(poly3,6.4939394023,10) + + # Test polygamma(0, x) == psi(x) + x = [2, 3, 1.1e14] + assert_almost_equal(special.polygamma(0, x), special.psi(x)) + + # Test broadcasting + n = [0, 1, 2] + x = [0.5, 1.5, 2.5] + expected = [-1.9635100260214238, 0.93480220054467933, + -0.23620405164172739] + assert_almost_equal(special.polygamma(n, x), expected) + expected = np.vstack([expected]*2) + assert_almost_equal(special.polygamma(n, np.vstack([x]*2)), + expected) + assert_almost_equal(special.polygamma(np.vstack([n]*2), x), + expected) + + +class TestProCvSeq: + def test_pro_cv_seq(self): + prol = special.pro_cv_seq(0,3,1) + assert_array_almost_equal(prol,array([0.319000, + 2.593084, + 6.533471, + 12.514462]),5) + + +class TestPsi: + def test_psi(self): + ps = special.psi(1) + assert_almost_equal(ps,-0.57721566490153287,8) + + +class TestRadian: + def test_radian(self): + rad = special.radian(90,0,0) + assert_almost_equal(rad,pi/2.0,5) + + def test_radianmore(self): + rad1 = special.radian(90,1,60) + assert_almost_equal(rad1,pi/2+0.0005816135199345904,5) + + +class TestRiccati: + def test_riccati_jn(self): + N, x = 2, 0.2 + S = np.empty((N, N)) + for n in range(N): + j = special.spherical_jn(n, x) + jp = special.spherical_jn(n, x, derivative=True) + S[0,n] = x*j + S[1,n] = x*jp + j + assert_array_almost_equal(S, special.riccati_jn(n, x), 8) + + def test_riccati_yn(self): + N, x = 2, 0.2 + C = np.empty((N, N)) + for n in range(N): + y = special.spherical_yn(n, x) + yp = special.spherical_yn(n, x, derivative=True) + C[0,n] = x*y + C[1,n] = x*yp + y + assert_array_almost_equal(C, special.riccati_yn(n, x), 8) + + +class TestRound: + def test_round(self): + rnd = list(map(int, (special.round(10.1), + special.round(10.4), + special.round(10.5), + special.round(10.6)))) + + # Note: According to the documentation, scipy.special.round is + # supposed to round to the nearest even number if the fractional + # part is exactly 0.5. On some platforms, this does not appear + # to work and thus this test may fail. However, this unit test is + # correctly written. + rndrl = (10,10,10,11) + assert_array_equal(rnd,rndrl) + + +def test_sph_harm(): + # Tests derived from tables in + # https://en.wikipedia.org/wiki/Table_of_spherical_harmonics + sh = special.sph_harm + pi = np.pi + exp = np.exp + sqrt = np.sqrt + sin = np.sin + cos = np.cos + assert_array_almost_equal(sh(0,0,0,0), + 0.5/sqrt(pi)) + assert_array_almost_equal(sh(-2,2,0.,pi/4), + 0.25*sqrt(15./(2.*pi)) * + (sin(pi/4))**2.) + assert_array_almost_equal(sh(-2,2,0.,pi/2), + 0.25*sqrt(15./(2.*pi))) + assert_array_almost_equal(sh(2,2,pi,pi/2), + 0.25*sqrt(15/(2.*pi)) * + exp(0+2.*pi*1j)*sin(pi/2.)**2.) + assert_array_almost_equal(sh(2,4,pi/4.,pi/3.), + (3./8.)*sqrt(5./(2.*pi)) * + exp(0+2.*pi/4.*1j) * + sin(pi/3.)**2. * + (7.*cos(pi/3.)**2.-1)) + assert_array_almost_equal(sh(4,4,pi/8.,pi/6.), + (3./16.)*sqrt(35./(2.*pi)) * + exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.) + + +def test_sph_harm_ufunc_loop_selection(): + # see https://github.com/scipy/scipy/issues/4895 + dt = np.dtype(np.complex128) + assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt) + assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt) + assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt) + assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt) + assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt) + assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt) + + +class TestStruve: + def _series(self, v, z, n=100): + """Compute Struve function & error estimate from its power series.""" + k = arange(0, n) + r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5) + err = abs(r).max() * finfo(double).eps * n + return r.sum(), err + + def test_vs_series(self): + """Check Struve function versus its power series""" + for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]: + for z in [1, 10, 19, 21, 30]: + value, err = self._series(v, z) + assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z) + + def test_some_values(self): + assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7) + assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8) + assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12) + assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11) + assert_equal(special.struve(-12, -41), -special.struve(-12, 41)) + assert_equal(special.struve(+12, -41), -special.struve(+12, 41)) + assert_equal(special.struve(-11, -41), +special.struve(-11, 41)) + assert_equal(special.struve(+11, -41), +special.struve(+11, 41)) + + assert_(isnan(special.struve(-7.1, -1))) + assert_(isnan(special.struve(-10.1, -1))) + + def test_regression_679(self): + """Regression test for #679""" + assert_allclose(special.struve(-1.0, 20 - 1e-8), + special.struve(-1.0, 20 + 1e-8)) + assert_allclose(special.struve(-2.0, 20 - 1e-8), + special.struve(-2.0, 20 + 1e-8)) + assert_allclose(special.struve(-4.3, 20 - 1e-8), + special.struve(-4.3, 20 + 1e-8)) + + +def test_chi2_smalldf(): + assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110) + + +def test_ch2_inf(): + assert_equal(special.chdtr(0.7,np.inf), 1.0) + + +def test_chi2c_smalldf(): + assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110) + + +def test_chi2_inv_smalldf(): + assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3) + + +def test_agm_simple(): + rtol = 1e-13 + + # Gauss's constant + assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186, + rtol=rtol) + + # These values were computed using Wolfram Alpha, with the + # function ArithmeticGeometricMean[a, b]. + agm13 = 1.863616783244897 + agm15 = 2.604008190530940 + agm35 = 3.936235503649555 + assert_allclose(special.agm([[1], [3]], [1, 3, 5]), + [[1, agm13, agm15], + [agm13, 3, agm35]], rtol=rtol) + + # Computed by the iteration formula using mpmath, + # with mpmath.mp.prec = 1000: + agm12 = 1.4567910310469068 + assert_allclose(special.agm(1, 2), agm12, rtol=rtol) + assert_allclose(special.agm(2, 1), agm12, rtol=rtol) + assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol) + assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol) + assert_allclose(special.agm(13, 123456789.5), 11111458.498599306, + rtol=rtol) + assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol) + assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol) + assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178, + rtol=rtol) + assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177, + rtol=rtol) + assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152, + rtol=rtol) + fi = np.finfo(1.0) + assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305, + rtol=rtol) + assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308, + rtol=rtol) + assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308, + rtol=rtol) + + # zero, nan and inf cases. + assert_equal(special.agm(0, 0), 0) + assert_equal(special.agm(99, 0), 0) + + assert_equal(special.agm(-1, 10), np.nan) + assert_equal(special.agm(0, np.inf), np.nan) + assert_equal(special.agm(np.inf, 0), np.nan) + assert_equal(special.agm(0, -np.inf), np.nan) + assert_equal(special.agm(-np.inf, 0), np.nan) + assert_equal(special.agm(np.inf, -np.inf), np.nan) + assert_equal(special.agm(-np.inf, np.inf), np.nan) + assert_equal(special.agm(1, np.nan), np.nan) + assert_equal(special.agm(np.nan, -1), np.nan) + + assert_equal(special.agm(1, np.inf), np.inf) + assert_equal(special.agm(np.inf, 1), np.inf) + assert_equal(special.agm(-1, -np.inf), -np.inf) + assert_equal(special.agm(-np.inf, -1), -np.inf) + + +def test_legacy(): + # Legacy behavior: truncating arguments to integers + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "floating point number truncated to an integer") + assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3)) + assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3)) + assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3)) + assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3)) + assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3)) + assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3)) + assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3)) + assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3)) + assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3)) + + +@with_special_errors +def test_error_raising(): + assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j) + + +def test_xlogy(): + def xfunc(x, y): + with np.errstate(invalid='ignore'): + if x == 0 and not np.isnan(y): + return x + else: + return x*np.log(y) + + z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float) + z2 = np.r_[z1, [(0, 1j), (1, 1j)]] + + w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) + assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13) + w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1]) + assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13) + + +def test_xlog1py(): + def xfunc(x, y): + with np.errstate(invalid='ignore'): + if x == 0 and not np.isnan(y): + return x + else: + return x * np.log1p(y) + + z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0), + (1, 1e-30)], dtype=float) + w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) + assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13) + + +def test_entr(): + def xfunc(x): + if x < 0: + return -np.inf + else: + return -special.xlogy(x, x) + values = (0, 0.5, 1.0, np.inf) + signs = [-1, 1] + arr = [] + for sgn, v in itertools.product(signs, values): + arr.append(sgn * v) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z) + assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13) + + +def test_kl_div(): + def xfunc(x, y): + if x < 0 or y < 0 or (y == 0 and x != 0): + # extension of natural domain to preserve convexity + return np.inf + elif np.isposinf(x) or np.isposinf(y): + # limits within the natural domain + return np.inf + elif x == 0: + return y + else: + return special.xlogy(x, x/y) - x + y + values = (0, 0.5, 1.0) + signs = [-1, 1] + arr = [] + for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): + arr.append((sgna*va, sgnb*vb)) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13) + + +def test_rel_entr(): + def xfunc(x, y): + if x > 0 and y > 0: + return special.xlogy(x, x/y) + elif x == 0 and y >= 0: + return 0 + else: + return np.inf + values = (0, 0.5, 1.0) + signs = [-1, 1] + arr = [] + for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): + arr.append((sgna*va, sgnb*vb)) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13) + + +def test_huber(): + assert_equal(special.huber(-1, 1.5), np.inf) + assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5)) + assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2)) + + def xfunc(delta, r): + if delta < 0: + return np.inf + elif np.abs(r) < delta: + return 0.5 * np.square(r) + else: + return delta * (np.abs(r) - 0.5 * delta) + + z = np.random.randn(10, 2) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13) + + +def test_pseudo_huber(): + def xfunc(delta, r): + if delta < 0: + return np.inf + elif (not delta) or (not r): + return 0 + else: + return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1) + + z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]]) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13) + + +def test_pseudo_huber_small_r(): + delta = 1.0 + r = 1e-18 + y = special.pseudo_huber(delta, r) + # expected computed with mpmath: + # import mpmath + # mpmath.mp.dps = 200 + # r = mpmath.mpf(1e-18) + # expected = float(mpmath.sqrt(1 + r**2) - 1) + expected = 5.0000000000000005e-37 + assert_allclose(y, expected, rtol=1e-13) + + +def test_runtime_warning(): + with pytest.warns(RuntimeWarning, + match=r'Too many predicted coefficients'): + mathieu_odd_coef(1000, 1000) + with pytest.warns(RuntimeWarning, + match=r'Too many predicted coefficients'): + mathieu_even_coef(1000, 1000) + + +class TestStirling2: + table = [ + [1], + [0, 1], + [0, 1, 1], + [0, 1, 3, 1], + [0, 1, 7, 6, 1], + [0, 1, 15, 25, 10, 1], + [0, 1, 31, 90, 65, 15, 1], + [0, 1, 63, 301, 350, 140, 21, 1], + [0, 1, 127, 966, 1701, 1050, 266, 28, 1], + [0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1], + [0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1], + ] + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_table_cases(self, is_exact, comp, kwargs): + for n in range(1, len(self.table)): + k_values = list(range(n+1)) + row = self.table[n] + comp(row, stirling2([n], k_values, exact=is_exact), **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_valid_single_integer(self, is_exact, comp, kwargs): + comp(stirling2(0, 0, exact=is_exact), self.table[0][0], **kwargs) + comp(stirling2(4, 2, exact=is_exact), self.table[4][2], **kwargs) + # a single 2-tuple of integers as arguments must return an int and not + # an array whereas arrays of single values should return array + comp(stirling2(5, 3, exact=is_exact), 25, **kwargs) + comp(stirling2([5], [3], exact=is_exact), [25], **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_negative_integer(self, is_exact, comp, kwargs): + # negative integers for n or k arguments return 0 + comp(stirling2(-1, -1, exact=is_exact), 0, **kwargs) + comp(stirling2(-1, 2, exact=is_exact), 0, **kwargs) + comp(stirling2(2, -1, exact=is_exact), 0, **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_array_inputs(self, is_exact, comp, kwargs): + ans = [self.table[10][3], self.table[10][4]] + comp(stirling2(asarray([10, 10]), + asarray([3, 4]), + exact=is_exact), + ans) + comp(stirling2([10, 10], + asarray([3, 4]), + exact=is_exact), + ans) + comp(stirling2(asarray([10, 10]), + [3, 4], + exact=is_exact), + ans) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-13}) + ]) + def test_mixed_values(self, is_exact, comp, kwargs): + # negative values-of either n or k-should return 0 for the entry + ans = [0, 1, 3, 25, 1050, 5880, 9330] + n = [-1, 0, 3, 5, 8, 10, 10] + k = [-2, 0, 2, 3, 5, 7, 3] + comp(stirling2(n, k, exact=is_exact), ans, **kwargs) + + def test_correct_parity(self): + """Test parity follows well known identity. + + en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind#Parity + """ + n, K = 100, np.arange(101) + assert_equal( + stirling2(n, K, exact=True) % 2, + [math.comb(n - (k // 2) - 1, n - k) % 2 for k in K], + ) + + def test_big_numbers(self): + # via mpmath (bigger than 32bit) + ans = asarray([48063331393110, 48004081105038305]) + n = [25, 30] + k = [17, 4] + assert array_equal(stirling2(n, k, exact=True), ans) + # bigger than 64 bit + ans = asarray([2801934359500572414253157841233849412, + 14245032222277144547280648984426251]) + n = [42, 43] + k = [17, 23] + assert array_equal(stirling2(n, k, exact=True), ans) + + @pytest.mark.parametrize("N", [4.5, 3., 4+1j, "12", np.nan]) + @pytest.mark.parametrize("K", [3.5, 3, "2", None]) + @pytest.mark.parametrize("is_exact", [True, False]) + def test_unsupported_input_types(self, N, K, is_exact): + # object, float, string, complex are not supported and raise TypeError + with pytest.raises(TypeError): + stirling2(N, K, exact=is_exact) + + @pytest.mark.parametrize("is_exact", [True, False]) + def test_numpy_array_int_object_dtype(self, is_exact): + # python integers with arbitrary precision are *not* allowed as + # object type in numpy arrays are inconsistent from api perspective + ans = asarray(self.table[4][1:]) + n = asarray([4, 4, 4, 4], dtype=object) + k = asarray([1, 2, 3, 4], dtype=object) + with pytest.raises(TypeError): + array_equal(stirling2(n, k, exact=is_exact), ans) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-13}) + ]) + def test_numpy_array_unsigned_int_dtype(self, is_exact, comp, kwargs): + # numpy unsigned integers are allowed as dtype in numpy arrays + ans = asarray(self.table[4][1:]) + n = asarray([4, 4, 4, 4], dtype=np_ulong) + k = asarray([1, 2, 3, 4], dtype=np_ulong) + comp(stirling2(n, k, exact=False), ans, **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-13}) + ]) + def test_broadcasting_arrays_correctly(self, is_exact, comp, kwargs): + # broadcasting is handled by stirling2 + # test leading 1s are replicated + ans = asarray([[1, 15, 25, 10], [1, 7, 6, 1]]) # shape (2,4) + n = asarray([[5, 5, 5, 5], [4, 4, 4, 4]]) # shape (2,4) + k = asarray([1, 2, 3, 4]) # shape (4,) + comp(stirling2(n, k, exact=is_exact), ans, **kwargs) + # test that dims both mismatch broadcast correctly (5,1) & (6,) + n = asarray([[4], [4], [4], [4], [4]]) + k = asarray([0, 1, 2, 3, 4, 5]) + ans = asarray([[0, 1, 7, 6, 1, 0] for _ in range(5)]) + comp(stirling2(n, k, exact=False), ans, **kwargs) + + def test_temme_rel_max_error(self): + # python integers with arbitrary precision are *not* allowed as + # object type in numpy arrays are inconsistent from api perspective + x = list(range(51, 101, 5)) + for n in x: + k_entries = list(range(1, n+1)) + denom = stirling2([n], k_entries, exact=True) + num = denom - stirling2([n], k_entries, exact=False) + assert np.max(np.abs(num / denom)) < 2e-5 diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_bdtr.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_bdtr.py new file mode 100644 index 0000000000000000000000000000000000000000..57694becc49b2028f17eac819b80a225ac010795 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_bdtr.py @@ -0,0 +1,112 @@ +import numpy as np +import scipy.special as sc +import pytest +from numpy.testing import assert_allclose, assert_array_equal, suppress_warnings + + +class TestBdtr: + def test(self): + val = sc.bdtr(0, 1, 0.5) + assert_allclose(val, 0.5) + + def test_sum_is_one(self): + val = sc.bdtr([0, 1, 2], 2, 0.5) + assert_array_equal(val, [0.25, 0.75, 1.0]) + + def test_rounding(self): + double_val = sc.bdtr([0.1, 1.1, 2.1], 2, 0.5) + int_val = sc.bdtr([0, 1, 2], 2, 0.5) + assert_array_equal(double_val, int_val) + + @pytest.mark.parametrize('k, n, p', [ + (np.inf, 2, 0.5), + (1.0, np.inf, 0.5), + (1.0, 2, np.inf) + ]) + def test_inf(self, k, n, p): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + val = sc.bdtr(k, n, p) + assert np.isnan(val) + + def test_domain(self): + val = sc.bdtr(-1.1, 1, 0.5) + assert np.isnan(val) + + +class TestBdtrc: + def test_value(self): + val = sc.bdtrc(0, 1, 0.5) + assert_allclose(val, 0.5) + + def test_sum_is_one(self): + val = sc.bdtrc([0, 1, 2], 2, 0.5) + assert_array_equal(val, [0.75, 0.25, 0.0]) + + def test_rounding(self): + double_val = sc.bdtrc([0.1, 1.1, 2.1], 2, 0.5) + int_val = sc.bdtrc([0, 1, 2], 2, 0.5) + assert_array_equal(double_val, int_val) + + @pytest.mark.parametrize('k, n, p', [ + (np.inf, 2, 0.5), + (1.0, np.inf, 0.5), + (1.0, 2, np.inf) + ]) + def test_inf(self, k, n, p): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + val = sc.bdtrc(k, n, p) + assert np.isnan(val) + + def test_domain(self): + val = sc.bdtrc(-1.1, 1, 0.5) + val2 = sc.bdtrc(2.1, 1, 0.5) + assert np.isnan(val2) + assert_allclose(val, 1.0) + + def test_bdtr_bdtrc_sum_to_one(self): + bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5) + bdtrc_vals = sc.bdtrc([0, 1, 2], 2, 0.5) + vals = bdtr_vals + bdtrc_vals + assert_allclose(vals, [1.0, 1.0, 1.0]) + + +class TestBdtri: + def test_value(self): + val = sc.bdtri(0, 1, 0.5) + assert_allclose(val, 0.5) + + def test_sum_is_one(self): + val = sc.bdtri([0, 1], 2, 0.5) + actual = np.asarray([1 - 1/np.sqrt(2), 1/np.sqrt(2)]) + assert_allclose(val, actual) + + def test_rounding(self): + double_val = sc.bdtri([0.1, 1.1], 2, 0.5) + int_val = sc.bdtri([0, 1], 2, 0.5) + assert_allclose(double_val, int_val) + + @pytest.mark.parametrize('k, n, p', [ + (np.inf, 2, 0.5), + (1.0, np.inf, 0.5), + (1.0, 2, np.inf) + ]) + def test_inf(self, k, n, p): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + val = sc.bdtri(k, n, p) + assert np.isnan(val) + + @pytest.mark.parametrize('k, n, p', [ + (-1.1, 1, 0.5), + (2.1, 1, 0.5) + ]) + def test_domain(self, k, n, p): + val = sc.bdtri(k, n, p) + assert np.isnan(val) + + def test_bdtr_bdtri_roundtrip(self): + bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5) + roundtrip_vals = sc.bdtri([0, 1, 2], 2, bdtr_vals) + assert_allclose(roundtrip_vals, [0.5, 0.5, np.nan]) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_boxcox.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_boxcox.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a0d4305c191d8f157ef152567dc511463285bd --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_boxcox.py @@ -0,0 +1,106 @@ +import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, assert_allclose +from scipy.special import boxcox, boxcox1p, inv_boxcox, inv_boxcox1p + + +# There are more tests of boxcox and boxcox1p in test_mpmath.py. + +def test_boxcox_basic(): + x = np.array([0.5, 1, 2, 4]) + + # lambda = 0 => y = log(x) + y = boxcox(x, 0) + assert_almost_equal(y, np.log(x)) + + # lambda = 1 => y = x - 1 + y = boxcox(x, 1) + assert_almost_equal(y, x - 1) + + # lambda = 2 => y = 0.5*(x**2 - 1) + y = boxcox(x, 2) + assert_almost_equal(y, 0.5*(x**2 - 1)) + + # x = 0 and lambda > 0 => y = -1 / lambda + lam = np.array([0.5, 1, 2]) + y = boxcox(0, lam) + assert_almost_equal(y, -1.0 / lam) + +def test_boxcox_underflow(): + x = 1 + 1e-15 + lmbda = 1e-306 + y = boxcox(x, lmbda) + assert_allclose(y, np.log(x), rtol=1e-14) + + +def test_boxcox_nonfinite(): + # x < 0 => y = nan + x = np.array([-1, -1, -0.5]) + y = boxcox(x, [0.5, 2.0, -1.5]) + assert_equal(y, np.array([np.nan, np.nan, np.nan])) + + # x = 0 and lambda <= 0 => y = -inf + x = 0 + y = boxcox(x, [-2.5, 0]) + assert_equal(y, np.array([-np.inf, -np.inf])) + + +def test_boxcox1p_basic(): + x = np.array([-0.25, -1e-20, 0, 1e-20, 0.25, 1, 3]) + + # lambda = 0 => y = log(1+x) + y = boxcox1p(x, 0) + assert_almost_equal(y, np.log1p(x)) + + # lambda = 1 => y = x + y = boxcox1p(x, 1) + assert_almost_equal(y, x) + + # lambda = 2 => y = 0.5*((1+x)**2 - 1) = 0.5*x*(2 + x) + y = boxcox1p(x, 2) + assert_almost_equal(y, 0.5*x*(2 + x)) + + # x = -1 and lambda > 0 => y = -1 / lambda + lam = np.array([0.5, 1, 2]) + y = boxcox1p(-1, lam) + assert_almost_equal(y, -1.0 / lam) + + +def test_boxcox1p_underflow(): + x = np.array([1e-15, 1e-306]) + lmbda = np.array([1e-306, 1e-18]) + y = boxcox1p(x, lmbda) + assert_allclose(y, np.log1p(x), rtol=1e-14) + + +def test_boxcox1p_nonfinite(): + # x < -1 => y = nan + x = np.array([-2, -2, -1.5]) + y = boxcox1p(x, [0.5, 2.0, -1.5]) + assert_equal(y, np.array([np.nan, np.nan, np.nan])) + + # x = -1 and lambda <= 0 => y = -inf + x = -1 + y = boxcox1p(x, [-2.5, 0]) + assert_equal(y, np.array([-np.inf, -np.inf])) + + +def test_inv_boxcox(): + x = np.array([0., 1., 2.]) + lam = np.array([0., 1., 2.]) + y = boxcox(x, lam) + x2 = inv_boxcox(y, lam) + assert_almost_equal(x, x2) + + x = np.array([0., 1., 2.]) + lam = np.array([0., 1., 2.]) + y = boxcox1p(x, lam) + x2 = inv_boxcox1p(y, lam) + assert_almost_equal(x, x2) + + +def test_inv_boxcox1p_underflow(): + x = 1e-15 + lam = 1e-306 + y = inv_boxcox1p(x, lam) + assert_allclose(y, x, rtol=1e-14) + diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py new file mode 100644 index 0000000000000000000000000000000000000000..ca3e82299824b1b349ef46f0266230fc3ad2fa7e --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py @@ -0,0 +1,527 @@ +""" +Test cdflib functions versus mpmath, if available. + +The following functions still need tests: + +- ncfdtr +- ncfdtri +- ncfdtridfn +- ncfdtridfd +- ncfdtrinc +- nbdtrik +- nbdtrin +- pdtrik +- nctdtr +- nctdtrit +- nctdtridf +- nctdtrinc + +""" +import itertools + +import numpy as np +from numpy.testing import assert_equal, assert_allclose +import pytest + +import scipy.special as sp +from scipy.special._testutils import ( + MissingModule, check_version, FuncData) +from scipy.special._mptestutils import ( + Arg, IntArg, get_args, mpf2float, assert_mpmath_equal) + +try: + import mpmath +except ImportError: + mpmath = MissingModule('mpmath') + + +class ProbArg: + """Generate a set of probabilities on [0, 1].""" + + def __init__(self): + # Include the endpoints for compatibility with Arg et. al. + self.a = 0 + self.b = 1 + + def values(self, n): + """Return an array containing approximately n numbers.""" + m = max(1, n//3) + v1 = np.logspace(-30, np.log10(0.3), m) + v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:] + v3 = 1 - np.logspace(np.log10(0.3), -15, m) + v = np.r_[v1, v2, v3] + return np.unique(v) + + +class EndpointFilter: + def __init__(self, a, b, rtol, atol): + self.a = a + self.b = b + self.rtol = rtol + self.atol = atol + + def __call__(self, x): + mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol + mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol + return np.where(mask1 | mask2, False, True) + + +class _CDFData: + def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True, + dps=20, n=5000, rtol=None, atol=None, + endpt_rtol=None, endpt_atol=None): + self.spfunc = spfunc + self.mpfunc = mpfunc + self.index = index + self.argspec = argspec + self.spfunc_first = spfunc_first + self.dps = dps + self.n = n + self.rtol = rtol + self.atol = atol + + if not isinstance(argspec, list): + self.endpt_rtol = None + self.endpt_atol = None + elif endpt_rtol is not None or endpt_atol is not None: + if isinstance(endpt_rtol, list): + self.endpt_rtol = endpt_rtol + else: + self.endpt_rtol = [endpt_rtol]*len(self.argspec) + if isinstance(endpt_atol, list): + self.endpt_atol = endpt_atol + else: + self.endpt_atol = [endpt_atol]*len(self.argspec) + else: + self.endpt_rtol = None + self.endpt_atol = None + + def idmap(self, *args): + if self.spfunc_first: + res = self.spfunc(*args) + if np.isnan(res): + return np.nan + args = list(args) + args[self.index] = res + with mpmath.workdps(self.dps): + res = self.mpfunc(*tuple(args)) + # Imaginary parts are spurious + res = mpf2float(res.real) + else: + with mpmath.workdps(self.dps): + res = self.mpfunc(*args) + res = mpf2float(res.real) + args = list(args) + args[self.index] = res + res = self.spfunc(*tuple(args)) + return res + + def get_param_filter(self): + if self.endpt_rtol is None and self.endpt_atol is None: + return None + + filters = [] + for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec): + if rtol is None and atol is None: + filters.append(None) + continue + elif rtol is None: + rtol = 0.0 + elif atol is None: + atol = 0.0 + + filters.append(EndpointFilter(spec.a, spec.b, rtol, atol)) + return filters + + def check(self): + # Generate values for the arguments + args = get_args(self.argspec, self.n) + param_filter = self.get_param_filter() + param_columns = tuple(range(args.shape[1])) + result_columns = args.shape[1] + args = np.hstack((args, args[:, self.index].reshape(args.shape[0], 1))) + FuncData(self.idmap, args, + param_columns=param_columns, result_columns=result_columns, + rtol=self.rtol, atol=self.atol, vectorized=False, + param_filter=param_filter).check() + + +def _assert_inverts(*a, **kw): + d = _CDFData(*a, **kw) + d.check() + + +def _binomial_cdf(k, n, p): + k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p) + if k <= 0: + return mpmath.mpf(0) + elif k >= n: + return mpmath.mpf(1) + + onemp = mpmath.fsub(1, p, exact=True) + return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True) + + +def _f_cdf(dfn, dfd, x): + if x < 0: + return mpmath.mpf(0) + dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x) + ub = dfn*x/(dfn*x + dfd) + res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True) + return res + + +def _student_t_cdf(df, t, dps=None): + if dps is None: + dps = mpmath.mp.dps + with mpmath.workdps(dps): + df, t = mpmath.mpf(df), mpmath.mpf(t) + fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df) + fac *= t*mpmath.gamma(0.5*(df + 1)) + fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df) + return 0.5 + fac + + +def _noncentral_chi_pdf(t, df, nc): + res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t)) + res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2 + return res + + +def _noncentral_chi_cdf(x, df, nc, dps=None): + if dps is None: + dps = mpmath.mp.dps + x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc) + with mpmath.workdps(dps): + res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x]) + return res + + +def _tukey_lmbda_quantile(p, lmbda): + # For lmbda != 0 + return (p**lmbda - (1 - p)**lmbda)/lmbda + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +class TestCDFlib: + + @pytest.mark.xfail(run=False) + def test_bdtrik(self): + _assert_inverts( + sp.bdtrik, + _binomial_cdf, + 0, [ProbArg(), IntArg(1, 1000), ProbArg()], + rtol=1e-4) + + def test_bdtrin(self): + _assert_inverts( + sp.bdtrin, + _binomial_cdf, + 1, [IntArg(1, 1000), ProbArg(), ProbArg()], + rtol=1e-4, endpt_atol=[None, None, 1e-6]) + + def test_btdtria(self): + _assert_inverts( + sp.btdtria, + lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True), + 0, [ProbArg(), Arg(0, 1e2, inclusive_a=False), + Arg(0, 1, inclusive_a=False, inclusive_b=False)], + rtol=1e-6) + + def test_btdtrib(self): + # Use small values of a or mpmath doesn't converge + _assert_inverts( + sp.btdtrib, + lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True), + 1, + [Arg(0, 1e2, inclusive_a=False), ProbArg(), + Arg(0, 1, inclusive_a=False, inclusive_b=False)], + rtol=1e-7, + endpt_atol=[None, 1e-18, 1e-15]) + + @pytest.mark.xfail(run=False) + def test_fdtridfd(self): + _assert_inverts( + sp.fdtridfd, + _f_cdf, + 1, + [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)], + rtol=1e-7) + + def test_gdtria(self): + _assert_inverts( + sp.gdtria, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 0, + [ProbArg(), Arg(0, 1e3, inclusive_a=False), + Arg(0, 1e4, inclusive_a=False)], + rtol=1e-7, + endpt_atol=[None, 1e-7, 1e-10]) + + def test_gdtrib(self): + # Use small values of a and x or mpmath doesn't converge + _assert_inverts( + sp.gdtrib, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 1, + [Arg(0, 1e2, inclusive_a=False), ProbArg(), + Arg(0, 1e3, inclusive_a=False)], + rtol=1e-5) + + def test_gdtrix(self): + _assert_inverts( + sp.gdtrix, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 2, + [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False), + ProbArg()], + rtol=1e-7, + endpt_atol=[None, 1e-7, 1e-10]) + + # Overall nrdtrimn and nrdtrisd are not performing well with infeasible/edge + # combinations of sigma and x, hence restricted the domains to still use the + # testing machinery, also see gh-20069 + + # nrdtrimn signature: p, sd, x + # nrdtrisd signature: mn, p, x + def test_nrdtrimn(self): + _assert_inverts( + sp.nrdtrimn, + lambda x, y, z: mpmath.ncdf(z, x, y), + 0, + [ProbArg(), # CDF value p + Arg(0.1, np.inf, inclusive_a=False, inclusive_b=False), # sigma + Arg(-1e10, 1e10)], # x + rtol=1e-5) + + def test_nrdtrisd(self): + _assert_inverts( + sp.nrdtrisd, + lambda x, y, z: mpmath.ncdf(z, x, y), + 1, + [Arg(-np.inf, 10, inclusive_a=False, inclusive_b=False), # mn + ProbArg(), # CDF value p + Arg(10, 1e100)], # x + rtol=1e-5) + + def test_stdtr(self): + # Ideally the left endpoint for Arg() should be 0. + assert_mpmath_equal( + sp.stdtr, + _student_t_cdf, + [IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7) + + @pytest.mark.xfail(run=False) + def test_stdtridf(self): + _assert_inverts( + sp.stdtridf, + _student_t_cdf, + 0, [ProbArg(), Arg()], rtol=1e-7) + + def test_stdtrit(self): + _assert_inverts( + sp.stdtrit, + _student_t_cdf, + 1, [IntArg(1, 100), ProbArg()], rtol=1e-7, + endpt_atol=[None, 1e-10]) + + def test_chdtriv(self): + _assert_inverts( + sp.chdtriv, + lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True), + 0, [ProbArg(), IntArg(1, 100)], rtol=1e-4) + + @pytest.mark.xfail(run=False) + def test_chndtridf(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtridf, + _noncentral_chi_cdf, + 1, [Arg(0, 100, inclusive_a=False), ProbArg(), + Arg(0, 100, inclusive_a=False)], + n=1000, rtol=1e-4, atol=1e-15) + + @pytest.mark.xfail(run=False) + def test_chndtrinc(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtrinc, + _noncentral_chi_cdf, + 2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()], + n=1000, rtol=1e-4, atol=1e-15) + + def test_chndtrix(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtrix, + _noncentral_chi_cdf, + 0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)], + n=1000, rtol=1e-4, atol=1e-15, + endpt_atol=[1e-6, None, None]) + + def test_tklmbda_zero_shape(self): + # When lmbda = 0 the CDF has a simple closed form + one = mpmath.mpf(1) + assert_mpmath_equal( + lambda x: sp.tklmbda(x, 0), + lambda x: one/(mpmath.exp(-x) + one), + [Arg()], rtol=1e-7) + + def test_tklmbda_neg_shape(self): + _assert_inverts( + sp.tklmbda, + _tukey_lmbda_quantile, + 0, [ProbArg(), Arg(-25, 0, inclusive_b=False)], + spfunc_first=False, rtol=1e-5, + endpt_atol=[1e-9, 1e-5]) + + @pytest.mark.xfail(run=False) + def test_tklmbda_pos_shape(self): + _assert_inverts( + sp.tklmbda, + _tukey_lmbda_quantile, + 0, [ProbArg(), Arg(0, 100, inclusive_a=False)], + spfunc_first=False, rtol=1e-5) + + # The values of lmdba are chosen so that 1/lmbda is exact. + @pytest.mark.parametrize('lmbda', [0.5, 1.0, 8.0]) + def test_tklmbda_lmbda1(self, lmbda): + bound = 1/lmbda + assert_equal(sp.tklmbda([-bound, bound], lmbda), [0.0, 1.0]) + + +funcs = [ + ("btdtria", 3), + ("btdtrib", 3), + ("bdtrik", 3), + ("bdtrin", 3), + ("chdtriv", 2), + ("chndtr", 3), + ("chndtrix", 3), + ("chndtridf", 3), + ("chndtrinc", 3), + ("fdtridfd", 3), + ("ncfdtr", 4), + ("ncfdtri", 4), + ("ncfdtridfn", 4), + ("ncfdtridfd", 4), + ("ncfdtrinc", 4), + ("gdtrix", 3), + ("gdtrib", 3), + ("gdtria", 3), + ("nbdtrik", 3), + ("nbdtrin", 3), + ("nrdtrimn", 3), + ("nrdtrisd", 3), + ("pdtrik", 2), + ("stdtr", 2), + ("stdtrit", 2), + ("stdtridf", 2), + ("nctdtr", 3), + ("nctdtrit", 3), + ("nctdtridf", 3), + ("nctdtrinc", 3), + ("tklmbda", 2), +] + + +@pytest.mark.parametrize('func,numargs', funcs, ids=[x[0] for x in funcs]) +def test_nonfinite(func, numargs): + + rng = np.random.default_rng(1701299355559735) + func = getattr(sp, func) + args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in rng.random(numargs)] + + for args in itertools.product(*args_choices): + res = func(*args) + + if any(np.isnan(x) for x in args): + # Nan inputs should result to nan output + assert_equal(res, np.nan) + else: + # All other inputs should return something (but not + # raise exceptions or cause hangs) + pass + + +def test_chndtrix_gh2158(): + # test that gh-2158 is resolved; previously this blew up + res = sp.chndtrix(0.999999, 2, np.arange(20.)+1e-6) + + # Generated in R + # options(digits=16) + # ncp <- seq(0, 19) + 1e-6 + # print(qchisq(0.999999, df = 2, ncp = ncp)) + res_exp = [27.63103493142305, 35.25728589950540, 39.97396073236288, + 43.88033702110538, 47.35206403482798, 50.54112500166103, + 53.52720257322766, 56.35830042867810, 59.06600769498512, + 61.67243118946381, 64.19376191277179, 66.64228141346548, + 69.02756927200180, 71.35726934749408, 73.63759723904816, + 75.87368842650227, 78.06984431185720, 80.22971052389806, + 82.35640899964173, 84.45263768373256] + assert_allclose(res, res_exp) + +@pytest.mark.xfail_on_32bit("32bit fails due to algorithm threshold") +def test_nctdtr_gh19896(): + # test that gh-19896 is resolved. + # Compared to SciPy 1.11 results from Fortran code. + dfarr = [0.98, 9.8, 98, 980] + pnoncarr = [-3.8, 0.38, 3.8, 38] + tarr = [0.0015, 0.15, 1.5, 15] + resarr = [0.9999276519560749, 0.9999276519560749, 0.9999908831755221, + 0.9999990265452424, 0.3524153312279712, 0.39749697267251416, + 0.7168629634895805, 0.9656246449259646, 7.234804392512006e-05, + 7.234804392512006e-05, 0.03538804607509127, 0.795482701508521, + 0.0, 0.0, 0.0, + 0.011927908523093889, 0.9999276519560749, 0.9999276519560749, + 0.9999997441133123, 1.0, 0.3525155979118013, + 0.4076312014048369, 0.8476794017035086, 0.9999999297116268, + 7.234804392512006e-05, 7.234804392512006e-05, 0.013477443099785824, + 0.9998501512331494, 0.0, 0.0, + 0.0, 6.561112613212572e-07, 0.9999276519560749, + 0.9999276519560749, 0.9999999313496014, 1.0, + 0.3525281784865706, 0.40890253001898014, 0.8664672830017024, + 1.0, 7.234804392512006e-05, 7.234804392512006e-05, + 0.010990889489704836, 1.0, 0.0, + 0.0, 0.0, 0.0, + 0.9999276519560749, 0.9999276519560749, 0.9999999418789304, + 1.0, 0.35252945487817355, 0.40903153246690993, + 0.8684247068528264, 1.0, 7.234804392512006e-05, + 7.234804392512006e-05, 0.01075068918582911, 1.0, + 0.0, 0.0, 0.0, 0.0] + actarr = [] + for df, p, t in itertools.product(dfarr, pnoncarr, tarr): + actarr += [sp.nctdtr(df, p, t)] + # The rtol is kept high on purpose to make it pass on 32bit systems + assert_allclose(actarr, resarr, rtol=1e-6, atol=0.0) + + +def test_nctdtrinc_gh19896(): + # test that gh-19896 is resolved. + # Compared to SciPy 1.11 results from Fortran code. + dfarr = [0.001, 0.98, 9.8, 98, 980, 10000, 98, 9.8, 0.98, 0.001] + parr = [0.001, 0.1, 0.3, 0.8, 0.999, 0.001, 0.1, 0.3, 0.8, 0.999] + tarr = [0.0015, 0.15, 1.5, 15, 300, 0.0015, 0.15, 1.5, 15, 300] + desired = [3.090232306168629, 1.406141304556198, 2.014225177124157, + 13.727067118283456, 278.9765683871208, 3.090232306168629, + 1.4312427877936222, 2.014225177124157, 3.712743137978295, + -3.086951096691082] + actual = sp.nctdtrinc(dfarr, parr, tarr) + assert_allclose(actual, desired, rtol=5e-12, atol=0.0) + + +def test_stdtr_stdtrit_neg_inf(): + # -inf was treated as +inf and values from the normal were returned + assert np.all(np.isnan(sp.stdtr(-np.inf, [-np.inf, -1.0, 0.0, 1.0, np.inf]))) + assert np.all(np.isnan(sp.stdtrit(-np.inf, [0.0, 0.25, 0.5, 0.75, 1.0]))) + + +def test_bdtrik_nbdtrik_inf(): + y = np.array( + [np.nan,-np.inf,-10.0, -1.0, 0.0, .00001, .5, 0.9999, 1.0, 10.0, np.inf]) + y = y[:,None] + p = np.atleast_2d( + [np.nan, -np.inf, -10.0, -1.0, 0.0, .00001, .5, 1.0, np.inf]) + assert np.all(np.isnan(sp.bdtrik(y, np.inf, p))) + assert np.all(np.isnan(sp.nbdtrik(y, np.inf, p))) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py new file mode 100644 index 0000000000000000000000000000000000000000..8b1ad41243f0865c205963d938ab61a346ee8e88 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py @@ -0,0 +1,49 @@ +# gh-14777 regression tests +# Test stdtr and stdtrit with infinite df and large values of df + +import numpy as np +from numpy.testing import assert_allclose, assert_equal +from scipy.special import stdtr, stdtrit, ndtr, ndtri + + +def test_stdtr_vs_R_large_df(): + df = [1e10, 1e12, 1e120, np.inf] + t = 1. + res = stdtr(df, t) + # R Code: + # options(digits=20) + # pt(1., c(1e10, 1e12, 1e120, Inf)) + res_R = [0.84134474605644460343, + 0.84134474606842180044, + 0.84134474606854281475, + 0.84134474606854292578] + assert_allclose(res, res_R, rtol=2e-15) + # last value should also agree with ndtr + assert_equal(res[3], ndtr(1.)) + + +def test_stdtrit_vs_R_large_df(): + df = [1e10, 1e12, 1e120, np.inf] + p = 0.1 + res = stdtrit(df, p) + # R Code: + # options(digits=20) + # qt(0.1, c(1e10, 1e12, 1e120, Inf)) + res_R = [-1.2815515656292593150, + -1.2815515655454472466, + -1.2815515655446008125, + -1.2815515655446008125] + assert_allclose(res, res_R, rtol=1e-14, atol=1e-15) + # last value should also agree with ndtri + assert_equal(res[3], ndtri(0.1)) + + +def test_stdtr_stdtri_invalid(): + # a mix of large and inf df with t/p equal to nan + df = [1e10, 1e12, 1e120, np.inf] + x = np.nan + res1 = stdtr(df, x) + res2 = stdtrit(df, x) + res_ex = 4*[np.nan] + assert_equal(res1, res_ex) + assert_equal(res2, res_ex) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cython_special.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cython_special.py new file mode 100644 index 0000000000000000000000000000000000000000..058b5a76c54833f127ccc2da3af10c058c00d38f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_cython_special.py @@ -0,0 +1,363 @@ +from __future__ import annotations +from typing import Callable + +import pytest +from itertools import product +from numpy.testing import assert_allclose, suppress_warnings +from scipy import special +from scipy.special import cython_special + + +bint_points = [True, False] +int_points = [-10, -1, 1, 10] +real_points = [-10.0, -1.0, 1.0, 10.0] +complex_points = [complex(*tup) for tup in product(real_points, repeat=2)] + + +CYTHON_SIGNATURE_MAP = { + 'b': 'bint', + 'f': 'float', + 'd': 'double', + 'g': 'long double', + 'F': 'float complex', + 'D': 'double complex', + 'G': 'long double complex', + 'i': 'int', + 'l': 'long' +} + + +TEST_POINTS = { + 'b': bint_points, + 'f': real_points, + 'd': real_points, + 'g': real_points, + 'F': complex_points, + 'D': complex_points, + 'G': complex_points, + 'i': int_points, + 'l': int_points, +} + + +PARAMS: list[tuple[Callable, Callable, tuple[str, ...], str | None]] = [ + (special.agm, cython_special.agm, ('dd',), None), + (special.airy, cython_special._airy_pywrap, ('d', 'D'), None), + (special.airye, cython_special._airye_pywrap, ('d', 'D'), None), + (special.bdtr, cython_special.bdtr, ('dld', 'ddd'), None), + (special.bdtrc, cython_special.bdtrc, ('dld', 'ddd'), None), + (special.bdtri, cython_special.bdtri, ('dld', 'ddd'), None), + (special.bdtrik, cython_special.bdtrik, ('ddd',), None), + (special.bdtrin, cython_special.bdtrin, ('ddd',), None), + (special.bei, cython_special.bei, ('d',), None), + (special.beip, cython_special.beip, ('d',), None), + (special.ber, cython_special.ber, ('d',), None), + (special.berp, cython_special.berp, ('d',), None), + (special.besselpoly, cython_special.besselpoly, ('ddd',), None), + (special.beta, cython_special.beta, ('dd',), None), + (special.betainc, cython_special.betainc, ('ddd',), None), + (special.betaincc, cython_special.betaincc, ('ddd',), None), + (special.betaincinv, cython_special.betaincinv, ('ddd',), None), + (special.betainccinv, cython_special.betainccinv, ('ddd',), None), + (special.betaln, cython_special.betaln, ('dd',), None), + (special.binom, cython_special.binom, ('dd',), None), + (special.boxcox, cython_special.boxcox, ('dd',), None), + (special.boxcox1p, cython_special.boxcox1p, ('dd',), None), + (special.btdtr, cython_special.btdtr, ('ddd',), None), + (special.btdtri, cython_special.btdtri, ('ddd',), None), + (special.btdtria, cython_special.btdtria, ('ddd',), None), + (special.btdtrib, cython_special.btdtrib, ('ddd',), None), + (special.cbrt, cython_special.cbrt, ('d',), None), + (special.chdtr, cython_special.chdtr, ('dd',), None), + (special.chdtrc, cython_special.chdtrc, ('dd',), None), + (special.chdtri, cython_special.chdtri, ('dd',), None), + (special.chdtriv, cython_special.chdtriv, ('dd',), None), + (special.chndtr, cython_special.chndtr, ('ddd',), None), + (special.chndtridf, cython_special.chndtridf, ('ddd',), None), + (special.chndtrinc, cython_special.chndtrinc, ('ddd',), None), + (special.chndtrix, cython_special.chndtrix, ('ddd',), None), + (special.cosdg, cython_special.cosdg, ('d',), None), + (special.cosm1, cython_special.cosm1, ('d',), None), + (special.cotdg, cython_special.cotdg, ('d',), None), + (special.dawsn, cython_special.dawsn, ('d', 'D'), None), + (special.ellipe, cython_special.ellipe, ('d',), None), + (special.ellipeinc, cython_special.ellipeinc, ('dd',), None), + (special.ellipj, cython_special._ellipj_pywrap, ('dd',), None), + (special.ellipkinc, cython_special.ellipkinc, ('dd',), None), + (special.ellipkm1, cython_special.ellipkm1, ('d',), None), + (special.ellipk, cython_special.ellipk, ('d',), None), + (special.elliprc, cython_special.elliprc, ('dd', 'DD'), None), + (special.elliprd, cython_special.elliprd, ('ddd', 'DDD'), None), + (special.elliprf, cython_special.elliprf, ('ddd', 'DDD'), None), + (special.elliprg, cython_special.elliprg, ('ddd', 'DDD'), None), + (special.elliprj, cython_special.elliprj, ('dddd', 'DDDD'), None), + (special.entr, cython_special.entr, ('d',), None), + (special.erf, cython_special.erf, ('d', 'D'), None), + (special.erfc, cython_special.erfc, ('d', 'D'), None), + (special.erfcx, cython_special.erfcx, ('d', 'D'), None), + (special.erfi, cython_special.erfi, ('d', 'D'), None), + (special.erfinv, cython_special.erfinv, ('d',), None), + (special.erfcinv, cython_special.erfcinv, ('d',), None), + (special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None), + (special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'), + 'd and l differ for negative int'), + (special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'), + 'd and l differ for negative int'), + (special.eval_hermite, cython_special.eval_hermite, ('ld',), None), + (special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None), + (special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'), + 'd and l differ for negative int'), + (special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None), + (special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None), + (special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'), + 'd and l differ for negative int'), + (special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), + None), + (special.exp1, cython_special.exp1, ('d', 'D'), None), + (special.exp10, cython_special.exp10, ('d',), None), + (special.exp2, cython_special.exp2, ('d',), None), + (special.expi, cython_special.expi, ('d', 'D'), None), + (special.expit, cython_special.expit, ('f', 'd', 'g'), None), + (special.expm1, cython_special.expm1, ('d', 'D'), None), + (special.expn, cython_special.expn, ('ld', 'dd'), None), + (special.exprel, cython_special.exprel, ('d',), None), + (special.fdtr, cython_special.fdtr, ('ddd',), None), + (special.fdtrc, cython_special.fdtrc, ('ddd',), None), + (special.fdtri, cython_special.fdtri, ('ddd',), None), + (special.fdtridfd, cython_special.fdtridfd, ('ddd',), None), + (special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None), + (special.gamma, cython_special.gamma, ('d', 'D'), None), + (special.gammainc, cython_special.gammainc, ('dd',), None), + (special.gammaincc, cython_special.gammaincc, ('dd',), None), + (special.gammainccinv, cython_special.gammainccinv, ('dd',), None), + (special.gammaincinv, cython_special.gammaincinv, ('dd',), None), + (special.gammaln, cython_special.gammaln, ('d',), None), + (special.gammasgn, cython_special.gammasgn, ('d',), None), + (special.gdtr, cython_special.gdtr, ('ddd',), None), + (special.gdtrc, cython_special.gdtrc, ('ddd',), None), + (special.gdtria, cython_special.gdtria, ('ddd',), None), + (special.gdtrib, cython_special.gdtrib, ('ddd',), None), + (special.gdtrix, cython_special.gdtrix, ('ddd',), None), + (special.hankel1, cython_special.hankel1, ('dD',), None), + (special.hankel1e, cython_special.hankel1e, ('dD',), None), + (special.hankel2, cython_special.hankel2, ('dD',), None), + (special.hankel2e, cython_special.hankel2e, ('dD',), None), + (special.huber, cython_special.huber, ('dd',), None), + (special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None), + (special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None), + (special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None), + (special.hyperu, cython_special.hyperu, ('ddd',), None), + (special.i0, cython_special.i0, ('d',), None), + (special.i0e, cython_special.i0e, ('d',), None), + (special.i1, cython_special.i1, ('d',), None), + (special.i1e, cython_special.i1e, ('d',), None), + (special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None), + (special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None), + (special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None), + (special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None), + (special.it2struve0, cython_special.it2struve0, ('d',), None), + (special.itairy, cython_special._itairy_pywrap, ('d',), None), + (special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None), + (special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None), + (special.itmodstruve0, cython_special.itmodstruve0, ('d',), None), + (special.itstruve0, cython_special.itstruve0, ('d',), None), + (special.iv, cython_special.iv, ('dd', 'dD'), None), + (special.ive, cython_special.ive, ('dd', 'dD'), None), + (special.j0, cython_special.j0, ('d',), None), + (special.j1, cython_special.j1, ('d',), None), + (special.jv, cython_special.jv, ('dd', 'dD'), None), + (special.jve, cython_special.jve, ('dd', 'dD'), None), + (special.k0, cython_special.k0, ('d',), None), + (special.k0e, cython_special.k0e, ('d',), None), + (special.k1, cython_special.k1, ('d',), None), + (special.k1e, cython_special.k1e, ('d',), None), + (special.kei, cython_special.kei, ('d',), None), + (special.keip, cython_special.keip, ('d',), None), + (special.kelvin, cython_special._kelvin_pywrap, ('d',), None), + (special.ker, cython_special.ker, ('d',), None), + (special.kerp, cython_special.kerp, ('d',), None), + (special.kl_div, cython_special.kl_div, ('dd',), None), + (special.kn, cython_special.kn, ('ld', 'dd'), None), + (special.kolmogi, cython_special.kolmogi, ('d',), None), + (special.kolmogorov, cython_special.kolmogorov, ('d',), None), + (special.kv, cython_special.kv, ('dd', 'dD'), None), + (special.kve, cython_special.kve, ('dd', 'dD'), None), + (special.log1p, cython_special.log1p, ('d', 'D'), None), + (special.log_expit, cython_special.log_expit, ('f', 'd', 'g'), None), + (special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None), + (special.ndtri_exp, cython_special.ndtri_exp, ('d',), None), + (special.loggamma, cython_special.loggamma, ('D',), None), + (special.logit, cython_special.logit, ('f', 'd', 'g'), None), + (special.lpmv, cython_special.lpmv, ('ddd',), None), + (special.mathieu_a, cython_special.mathieu_a, ('dd',), None), + (special.mathieu_b, cython_special.mathieu_b, ('dd',), None), + (special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None), + (special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None), + (special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None), + (special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None), + (special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None), + (special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None), + (special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None), + (special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None), + (special.modstruve, cython_special.modstruve, ('dd',), None), + (special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None), + (special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None), + (special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None), + (special.nbdtrik, cython_special.nbdtrik, ('ddd',), None), + (special.nbdtrin, cython_special.nbdtrin, ('ddd',), None), + (special.ncfdtr, cython_special.ncfdtr, ('dddd',), None), + (special.ncfdtri, cython_special.ncfdtri, ('dddd',), None), + (special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None), + (special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None), + (special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None), + (special.nctdtr, cython_special.nctdtr, ('ddd',), None), + (special.nctdtridf, cython_special.nctdtridf, ('ddd',), None), + (special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None), + (special.nctdtrit, cython_special.nctdtrit, ('ddd',), None), + (special.ndtr, cython_special.ndtr, ('d', 'D'), None), + (special.ndtri, cython_special.ndtri, ('d',), None), + (special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None), + (special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None), + (special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None), + (special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None), + (special.obl_cv, cython_special.obl_cv, ('ddd',), None), + (special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"), + (special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), + "see gh-6211"), + (special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"), + (special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), + "see gh-6211"), + (special.pbdv, cython_special._pbdv_pywrap, ('dd',), None), + (special.pbvv, cython_special._pbvv_pywrap, ('dd',), None), + (special.pbwa, cython_special._pbwa_pywrap, ('dd',), None), + (special.pdtr, cython_special.pdtr, ('dd', 'dd'), None), + (special.pdtrc, cython_special.pdtrc, ('dd', 'dd'), None), + (special.pdtri, cython_special.pdtri, ('ld', 'dd'), None), + (special.pdtrik, cython_special.pdtrik, ('dd',), None), + (special.poch, cython_special.poch, ('dd',), None), + (special.powm1, cython_special.powm1, ('dd',), None), + (special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None), + (special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None), + (special.pro_cv, cython_special.pro_cv, ('ddd',), None), + (special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"), + (special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), + "see gh-6211"), + (special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"), + (special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), + "see gh-6211"), + (special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None), + (special.psi, cython_special.psi, ('d', 'D'), None), + (special.radian, cython_special.radian, ('ddd',), None), + (special.rel_entr, cython_special.rel_entr, ('dd',), None), + (special.rgamma, cython_special.rgamma, ('d', 'D'), None), + (special.round, cython_special.round, ('d',), None), + (special.spherical_jn, cython_special.spherical_jn, ('ld', 'ldb', 'lD', 'lDb'), + None), + (special.spherical_yn, cython_special.spherical_yn, ('ld', 'ldb', 'lD', 'lDb'), + None), + (special.spherical_in, cython_special.spherical_in, ('ld', 'ldb', 'lD', 'lDb'), + None), + (special.spherical_kn, cython_special.spherical_kn, ('ld', 'ldb', 'lD', 'lDb'), + None), + (special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None), + (special.sici, cython_special._sici_pywrap, ('d', 'D'), None), + (special.sindg, cython_special.sindg, ('d',), None), + (special.smirnov, cython_special.smirnov, ('ld', 'dd'), None), + (special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None), + (special.spence, cython_special.spence, ('d', 'D'), None), + (special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None), + (special.stdtr, cython_special.stdtr, ('dd',), None), + (special.stdtridf, cython_special.stdtridf, ('dd',), None), + (special.stdtrit, cython_special.stdtrit, ('dd',), None), + (special.struve, cython_special.struve, ('dd',), None), + (special.tandg, cython_special.tandg, ('d',), None), + (special.tklmbda, cython_special.tklmbda, ('dd',), None), + (special.voigt_profile, cython_special.voigt_profile, ('ddd',), None), + (special.wofz, cython_special.wofz, ('D',), None), + (special.wright_bessel, cython_special.wright_bessel, ('ddd',), None), + (special.wrightomega, cython_special.wrightomega, ('D',), None), + (special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None), + (special.xlogy, cython_special.xlogy, ('dd', 'DD'), None), + (special.y0, cython_special.y0, ('d',), None), + (special.y1, cython_special.y1, ('d',), None), + (special.yn, cython_special.yn, ('ld', 'dd'), None), + (special.yv, cython_special.yv, ('dd', 'dD'), None), + (special.yve, cython_special.yve, ('dd', 'dD'), None), + (special.zetac, cython_special.zetac, ('d',), None), + (special.owens_t, cython_special.owens_t, ('dd',), None) +] + + +IDS = [x[0].__name__ for x in PARAMS] + + +def _generate_test_points(typecodes): + axes = tuple(TEST_POINTS[x] for x in typecodes) + pts = list(product(*axes)) + return pts + + +def test_cython_api_completeness(): + # Check that everything is tested + for name in dir(cython_special): + func = getattr(cython_special, name) + if callable(func) and not name.startswith('_'): + for _, cyfun, _, _ in PARAMS: + if cyfun is func: + break + else: + raise RuntimeError(f"{name} missing from tests!") + + +@pytest.mark.parametrize("param", PARAMS, ids=IDS) +def test_cython_api(param): + pyfunc, cyfunc, specializations, knownfailure = param + if knownfailure: + pytest.xfail(reason=knownfailure) + + # Check which parameters are expected to be fused types + max_params = max(len(spec) for spec in specializations) + values = [set() for _ in range(max_params)] + for typecodes in specializations: + for j, v in enumerate(typecodes): + values[j].add(v) + seen = set() + is_fused_code = [False] * len(values) + for j, v in enumerate(values): + vv = tuple(sorted(v)) + if vv in seen: + continue + is_fused_code[j] = (len(v) > 1) + seen.add(vv) + + # Check results + for typecodes in specializations: + # Pick the correct specialized function + signature = [CYTHON_SIGNATURE_MAP[code] + for j, code in enumerate(typecodes) + if is_fused_code[j]] + + if signature: + cy_spec_func = cyfunc[tuple(signature)] + else: + signature = None + cy_spec_func = cyfunc + + # Test it + pts = _generate_test_points(typecodes) + for pt in pts: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + pyval = pyfunc(*pt) + cyval = cy_spec_func(*pt) + assert_allclose(cyval, pyval, err_msg=f"{pt} {typecodes} {signature}") diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_data.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..40e3eed18ca6816c1c541bb61ecab660447b38f9 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_data.py @@ -0,0 +1,725 @@ +import importlib.resources + +import numpy as np +from numpy.testing import suppress_warnings +import pytest + +from scipy.special import ( + lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite, + eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta, + jn, jv, jvp, yn, yv, yvp, iv, ivp, kn, kv, kvp, + gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma, + beta, betainc, betaincinv, poch, + ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc, + elliprc, elliprd, elliprf, elliprg, elliprj, + erf, erfc, erfinv, erfcinv, exp1, expi, expn, + bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib, + nbdtrik, pdtrik, owens_t, + mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1, + mathieu_modsem1, mathieu_modcem2, mathieu_modsem2, + ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, wright_bessel +) +from scipy.integrate import IntegrationWarning + +from scipy.special._testutils import FuncData + + +# The npz files are generated, and hence may live in the build dir. We can only +# access them through `importlib.resources`, not an explicit path from `__file__` +_datadir = importlib.resources.files('scipy.special.tests.data') + +_boost_npz = _datadir.joinpath('boost.npz') +with importlib.resources.as_file(_boost_npz) as f: + DATASETS_BOOST = np.load(f) + +_gsl_npz = _datadir.joinpath('gsl.npz') +with importlib.resources.as_file(_gsl_npz) as f: + DATASETS_GSL = np.load(f) + +_local_npz = _datadir.joinpath('local.npz') +with importlib.resources.as_file(_local_npz) as f: + DATASETS_LOCAL = np.load(f) + + +def data(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_BOOST[dataname], *a, **kw) + + +def data_gsl(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_GSL[dataname], *a, **kw) + + +def data_local(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw) + + +def ellipk_(k): + return ellipk(k*k) + + +def ellipkinc_(f, k): + return ellipkinc(f, k*k) + + +def ellipe_(k): + return ellipe(k*k) + + +def ellipeinc_(f, k): + return ellipeinc(f, k*k) + + +def zeta_(x): + return zeta(x, 1.) + + +def assoc_legendre_p_boost_(nu, mu, x): + # the boost test data is for integer orders only + return lpmv(mu, nu.astype(int), x) + +def legendre_p_via_assoc_(nu, x): + return lpmv(0, nu, x) + +def lpn_(n, x): + return lpn(n.astype('l'), x)[0][-1] + +def lqn_(n, x): + return lqn(n.astype('l'), x)[0][-1] + +def legendre_p_via_lpmn(n, x): + return lpmn(0, n, x)[0][0,-1] + +def legendre_q_via_lqmn(n, x): + return lqmn(0, n, x)[0][0,-1] + +def mathieu_ce_rad(m, q, x): + return mathieu_cem(m, q, x*180/np.pi)[0] + + +def mathieu_se_rad(m, q, x): + return mathieu_sem(m, q, x*180/np.pi)[0] + + +def mathieu_mc1_scaled(m, q, x): + # GSL follows a different normalization. + # We follow Abramowitz & Stegun, they apparently something else. + return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_ms1_scaled(m, q, x): + return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_mc2_scaled(m, q, x): + return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_ms2_scaled(m, q, x): + return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2) + +def eval_legendre_ld(n, x): + return eval_legendre(n.astype('l'), x) + +def eval_legendre_dd(n, x): + return eval_legendre(n.astype('d'), x) + +def eval_hermite_ld(n, x): + return eval_hermite(n.astype('l'), x) + +def eval_laguerre_ld(n, x): + return eval_laguerre(n.astype('l'), x) + +def eval_laguerre_dd(n, x): + return eval_laguerre(n.astype('d'), x) + +def eval_genlaguerre_ldd(n, a, x): + return eval_genlaguerre(n.astype('l'), a, x) + +def eval_genlaguerre_ddd(n, a, x): + return eval_genlaguerre(n.astype('d'), a, x) + +def bdtrik_comp(y, n, p): + return bdtrik(1-y, n, p) + +def btdtri_comp(a, b, p): + return btdtri(a, b, 1-p) + +def btdtria_comp(p, b, x): + return btdtria(1-p, b, x) + +def btdtrib_comp(a, p, x): + return btdtrib(a, 1-p, x) + +def gdtr_(p, x): + return gdtr(1.0, p, x) + +def gdtrc_(p, x): + return gdtrc(1.0, p, x) + +def gdtrix_(b, p): + return gdtrix(1.0, b, p) + +def gdtrix_comp(b, p): + return gdtrix(1.0, b, 1-p) + +def gdtrib_(p, x): + return gdtrib(1.0, p, x) + +def gdtrib_comp(p, x): + return gdtrib(1.0, 1-p, x) + +def nbdtrik_comp(y, n, p): + return nbdtrik(1-y, n, p) + +def pdtrik_comp(p, m): + return pdtrik(1-p, m) + +def poch_(z, m): + return 1.0 / poch(z, m) + +def poch_minus(z, m): + return 1.0 / poch(z, -m) + +def spherical_jn_(n, x): + return spherical_jn(n.astype('l'), x) + +def spherical_yn_(n, x): + return spherical_yn(n.astype('l'), x) + +def sph_harm_(m, n, theta, phi): + y = sph_harm(m, n, theta, phi) + return (y.real, y.imag) + +def cexpm1(x, y): + z = expm1(x + 1j*y) + return z.real, z.imag + +def clog1p(x, y): + z = log1p(x + 1j*y) + return z.real, z.imag + + +BOOST_TESTS = [ + data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', + (0,1,2), 3, rtol=1e-11), + + data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=1e-11), + data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=9.6e-14), + data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=5e-14, vectorized=False), + data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=9.6e-14, vectorized=False), + data(lpn_, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=5e-14, vectorized=False), + data(lpn_, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=3e-13, vectorized=False), + data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=6e-14), + data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=2e-13), + data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=2e-14), + data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=2e-13), + + data(lqn_, 'legendre_p_ipp-legendre_p', + (0,1), 3, rtol=2e-14, vectorized=False), + data(lqn_, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 3, rtol=2e-12, vectorized=False), + data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', + (0,1), 3, rtol=2e-14, vectorized=False), + data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 3, rtol=2e-12, vectorized=False), + + data(beta, 'beta_exp_data_ipp-beta_exp_data', + (0,1), 2, rtol=1e-13), + data(beta, 'beta_exp_data_ipp-beta_exp_data', + (0,1), 2, rtol=1e-13), + data(beta, 'beta_med_data_ipp-beta_med_data', + (0,1), 2, rtol=5e-13), + + data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', + (0,1,2), 5, rtol=6e-15), + data(betainc, 'ibeta_data_ipp-ibeta_data', + (0,1,2), 5, rtol=5e-13), + data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', + (0,1,2), 5, rtol=2e-14), + data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', + (0,1,2), 5, rtol=4e-10), + + data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', + (0,1,2), 3, rtol=1e-5), + + data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', + (0,1,2), 5, rtol=6e-15), + data(btdtr, 'ibeta_data_ipp-ibeta_data', + (0,1,2), 5, rtol=4e-13), + data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', + (0,1,2), 5, rtol=2e-14), + data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', + (0,1,2), 5, rtol=4e-10), + + data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', + (0,1,2), 3, rtol=1e-5), + data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', + (0,1,2), 4, rtol=8e-7), + + data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', + (2,0,1), 3, rtol=5e-9), + data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', + (2,0,1), 4, rtol=5e-9), + + data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', + (0,2,1), 5, rtol=5e-9), + data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', + (0,2,1), 6, rtol=5e-9), + + data(binom, 'binomial_data_ipp-binomial_data', + (0,1), 2, rtol=1e-13), + data(binom, 'binomial_large_data_ipp-binomial_large_data', + (0,1), 2, rtol=5e-13), + + data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', + (2,0,1), 3, rtol=5e-9), + data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', + (2,0,1), 4, rtol=5e-9), + + data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', + (2,0,1), 3, rtol=4e-9), + data(nbdtrik_comp, + 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', + (2,0,1), 4, rtol=4e-9), + + data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', + (1,0), 2, rtol=3e-9), + data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', + (1,0), 3, rtol=4e-9), + + data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0), + + data(digamma, 'digamma_data_ipp-digamma_data', 0, 1), + data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1), + data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13), + data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13), + data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15), + data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15), + data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15), + data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14), + + data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1), + data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14), + data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1), + data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14), + + data(erf, 'erf_data_ipp-erf_data', 0, 1), + data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13), + data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15), + data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1), + data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1), + data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14), + data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1), + data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13), + data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2), + + data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1), + data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1), + data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data', 0, 1, + param_filter=(lambda s: s > 0)), + + data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13), + data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9), + data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13), + data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13), + data(expi, 'expinti_data_long_ipp-expinti_data_long', 0, 1), + + data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2), + data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14), + + data(gamma, 'test_gamma_data_ipp-near_0', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_1', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_2', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12), + data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14), + data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13), + data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10), + data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2), + + data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15), + data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), + data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), + data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12), + + data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13), + data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), + data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), + data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9), + + data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', + (0,1), 3, rtol=1e-13), + data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', + (0,1), 3, rtol=2e-13), + data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', + (0,1), 3, rtol=4e-14), + data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', + (0,1), 3, rtol=1e-11), + + data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13), + data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13), + data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14), + data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11), + + data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9), + data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9), + + data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', + (0,1), 2, rtol=2e-13), + data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', + (0,1), 2,), + data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', + (0,1), 2,), + data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', + (0,1), 3, rtol=2e-13), + data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', + (0,1), 3), + data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', + (0,1), 3), + + data(eval_hermite_ld, 'hermite_ipp-hermite', + (0,1), 2, rtol=2e-14), + + data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', + (0,1), 2, rtol=7e-12), + data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', + (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'), + data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', + (0,1,2), 3, rtol=2e-13), + data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', + (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'), + + data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1), + data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2), + + data(iv, 'bessel_i_data_ipp-bessel_i_data', + (0,1), 2, rtol=1e-12), + data(iv, 'bessel_i_data_ipp-bessel_i_data', + (0,1j), 2, rtol=2e-10, atol=1e-306), + data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', + (0,1), 2, rtol=1e-9), + data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', + (0,1j), 2, rtol=2e-10), + + data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', + (0,1), 2, rtol=1.2e-13), + data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', + (0,1j), 2, rtol=1.2e-13, atol=1e-300), + + data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), + data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), + data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11), + data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11), + + data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), + data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), + data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12), + data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12), + + data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', + (0,1), 2, rtol=1e-13), + data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', + (0,1j), 2, rtol=1e-13), + data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', + (0,1), 2, rtol=1e-11), + data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', + (0,1j), 2, rtol=2e-11), + + data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), + + data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), + data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12), + data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12), + data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12), + + data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', + (0,1), 2, rtol=3e-14), + data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', + (0,1j), 2, rtol=3e-14), + data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1), 2, rtol=7e-14), + data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1j), 2, rtol=7e-14), + + data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12), + data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), + + data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), + data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12), + data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10), + data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10), + + data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', + (0, 1), 2, rtol=4e-9), + data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', + (0, 1j), 2, rtol=4e-9), + + data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, + param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, + param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, + param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, + param_filter=(lambda s: s > 1)), + + data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 2, rtol=1e-11), + data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 2, rtol=1e-14), + data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 2, rtol=1e-11), + + data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 3, rtol=1e-12), + data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 3, rtol=1e-14), + data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 3, rtol=1e-14), + + data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'), + data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 2, rtol=3e-15), + data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 2), + data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 2, knownfailure='gdtrix bad some points'), + data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 3, rtol=6e-15), + data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 3), + + data(chndtr, 'nccs_ipp-nccs', + (2,0,1), 3, rtol=3e-5), + data(chndtr, 'nccs_big_ipp-nccs_big', + (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'), + + data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', + (1,0,3,2), (4,5), rtol=5e-11, + param_filter=(lambda p: np.ones(p.shape, '?'), + lambda p: np.ones(p.shape, '?'), + lambda p: np.logical_and(p < 2*np.pi, p >= 0), + lambda p: np.logical_and(p < np.pi, p >= 0))), + + data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', + (0,1), 2, rtol=1e-13), + data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', + (0,1), 2, rtol=8e-15), + + data(owens_t, 'owens_t_ipp-owens_t', + (0, 1), 2, rtol=5e-14), + data(owens_t, 'owens_t_large_data_ipp-owens_t_large_data', + (0, 1), 2, rtol=8e-12), + + # -- test data exists in boost but is not used in scipy -- + + # ibeta_derivative_data_ipp/ibeta_derivative_data.txt + # ibeta_derivative_int_data_ipp/ibeta_derivative_int_data.txt + # ibeta_derivative_large_data_ipp/ibeta_derivative_large_data.txt + # ibeta_derivative_small_data_ipp/ibeta_derivative_small_data.txt + + # bessel_y01_prime_data_ipp/bessel_y01_prime_data.txt + # bessel_yn_prime_data_ipp/bessel_yn_prime_data.txt + # sph_bessel_prime_data_ipp/sph_bessel_prime_data.txt + # sph_neumann_prime_data_ipp/sph_neumann_prime_data.txt + + # ellint_d2_data_ipp/ellint_d2_data.txt + # ellint_d_data_ipp/ellint_d_data.txt + # ellint_pi2_data_ipp/ellint_pi2_data.txt + # ellint_pi3_data_ipp/ellint_pi3_data.txt + # ellint_pi3_large_data_ipp/ellint_pi3_large_data.txt + data(elliprc, 'ellint_rc_data_ipp-ellint_rc_data', (0, 1), 2, + rtol=5e-16), + data(elliprd, 'ellint_rd_data_ipp-ellint_rd_data', (0, 1, 2), 3, + rtol=5e-16), + data(elliprd, 'ellint_rd_0xy_ipp-ellint_rd_0xy', (0, 1, 2), 3, + rtol=5e-16), + data(elliprd, 'ellint_rd_0yy_ipp-ellint_rd_0yy', (0, 1, 2), 3, + rtol=5e-16), + data(elliprd, 'ellint_rd_xxx_ipp-ellint_rd_xxx', (0, 1, 2), 3, + rtol=5e-16), + # Some of the following rtol for elliprd may be larger than 5e-16 to + # work around some hard cases in the Boost test where we get slightly + # larger error than the ideal bound when the x (==y) input is close to + # zero. + # Also the accuracy on 32-bit builds with g++ may suffer from excess + # loss of precision; see GCC bugzilla 323 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323 + data(elliprd, 'ellint_rd_xxz_ipp-ellint_rd_xxz', (0, 1, 2), 3, + rtol=6.5e-16), + data(elliprd, 'ellint_rd_xyy_ipp-ellint_rd_xyy', (0, 1, 2), 3, + rtol=6e-16), + data(elliprf, 'ellint_rf_data_ipp-ellint_rf_data', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_xxx_ipp-ellint_rf_xxx', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_xyy_ipp-ellint_rf_xyy', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_xy0_ipp-ellint_rf_xy0', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_0yy_ipp-ellint_rf_0yy', (0, 1, 2), 3, + rtol=5e-16), + # The accuracy of R_G is primarily limited by R_D that is used + # internally. It is generally worse than R_D. Notice that we increased + # the rtol for R_G here. The cases with duplicate arguments are + # slightly less likely to be unbalanced (at least two arguments are + # already balanced) so the error bound is slightly better. Again, + # precision with g++ 32-bit is even worse. + data(elliprg, 'ellint_rg_ipp-ellint_rg', (0, 1, 2), 3, + rtol=8.0e-16), + data(elliprg, 'ellint_rg_xxx_ipp-ellint_rg_xxx', (0, 1, 2), 3, + rtol=6e-16), + data(elliprg, 'ellint_rg_xyy_ipp-ellint_rg_xyy', (0, 1, 2), 3, + rtol=7.5e-16), + data(elliprg, 'ellint_rg_xy0_ipp-ellint_rg_xy0', (0, 1, 2), 3, + rtol=5e-16), + data(elliprg, 'ellint_rg_00x_ipp-ellint_rg_00x', (0, 1, 2), 3, + rtol=5e-16), + data(elliprj, 'ellint_rj_data_ipp-ellint_rj_data', (0, 1, 2, 3), 4, + rtol=5e-16, atol=1e-25, + param_filter=(lambda s: s <= 5e-26,)), + # ellint_rc_data_ipp/ellint_rc_data.txt + # ellint_rd_0xy_ipp/ellint_rd_0xy.txt + # ellint_rd_0yy_ipp/ellint_rd_0yy.txt + # ellint_rd_data_ipp/ellint_rd_data.txt + # ellint_rd_xxx_ipp/ellint_rd_xxx.txt + # ellint_rd_xxz_ipp/ellint_rd_xxz.txt + # ellint_rd_xyy_ipp/ellint_rd_xyy.txt + # ellint_rf_0yy_ipp/ellint_rf_0yy.txt + # ellint_rf_data_ipp/ellint_rf_data.txt + # ellint_rf_xxx_ipp/ellint_rf_xxx.txt + # ellint_rf_xy0_ipp/ellint_rf_xy0.txt + # ellint_rf_xyy_ipp/ellint_rf_xyy.txt + # ellint_rg_00x_ipp/ellint_rg_00x.txt + # ellint_rg_ipp/ellint_rg.txt + # ellint_rg_xxx_ipp/ellint_rg_xxx.txt + # ellint_rg_xy0_ipp/ellint_rg_xy0.txt + # ellint_rg_xyy_ipp/ellint_rg_xyy.txt + # ellint_rj_data_ipp/ellint_rj_data.txt + # ellint_rj_e2_ipp/ellint_rj_e2.txt + # ellint_rj_e3_ipp/ellint_rj_e3.txt + # ellint_rj_e4_ipp/ellint_rj_e4.txt + # ellint_rj_zp_ipp/ellint_rj_zp.txt + + # jacobi_elliptic_ipp/jacobi_elliptic.txt + # jacobi_elliptic_small_ipp/jacobi_elliptic_small.txt + # jacobi_large_phi_ipp/jacobi_large_phi.txt + # jacobi_near_1_ipp/jacobi_near_1.txt + # jacobi_zeta_big_phi_ipp/jacobi_zeta_big_phi.txt + # jacobi_zeta_data_ipp/jacobi_zeta_data.txt + + # heuman_lambda_data_ipp/heuman_lambda_data.txt + + # hypergeometric_0F2_ipp/hypergeometric_0F2.txt + # hypergeometric_1F1_big_ipp/hypergeometric_1F1_big.txt + # hypergeometric_1F1_ipp/hypergeometric_1F1.txt + # hypergeometric_1F1_small_random_ipp/hypergeometric_1F1_small_random.txt + # hypergeometric_1F2_ipp/hypergeometric_1F2.txt + # hypergeometric_1f1_large_regularized_ipp/hypergeometric_1f1_large_regularized.txt # noqa: E501 + # hypergeometric_1f1_log_large_unsolved_ipp/hypergeometric_1f1_log_large_unsolved.txt # noqa: E501 + # hypergeometric_2F0_half_ipp/hypergeometric_2F0_half.txt + # hypergeometric_2F0_integer_a2_ipp/hypergeometric_2F0_integer_a2.txt + # hypergeometric_2F0_ipp/hypergeometric_2F0.txt + # hypergeometric_2F0_large_z_ipp/hypergeometric_2F0_large_z.txt + # hypergeometric_2F1_ipp/hypergeometric_2F1.txt + # hypergeometric_2F2_ipp/hypergeometric_2F2.txt + + # ncbeta_big_ipp/ncbeta_big.txt + # nct_small_delta_ipp/nct_small_delta.txt + # nct_asym_ipp/nct_asym.txt + # ncbeta_ipp/ncbeta.txt + + # powm1_data_ipp/powm1_big_data.txt + # powm1_sqrtp1m1_test_hpp/sqrtp1m1_data.txt + + # sinc_data_ipp/sinc_data.txt + + # test_gamma_data_ipp/gammap1m1_data.txt + # tgamma_ratio_data_ipp/tgamma_ratio_data.txt + + # trig_data_ipp/trig_data.txt + # trig_data2_ipp/trig_data2.txt +] + + +@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr) +def test_boost(test): + # Filter deprecation warnings of any deprecated functions. + if test.func in [btdtr, btdtri, btdtri_comp]: + with pytest.deprecated_call(): + _test_factory(test) + else: + _test_factory(test) + + +GSL_TESTS = [ + data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13), + data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13), + + # Also the GSL output has limited accuracy... + data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13), + + data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', + (0, 1, 2), 3, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', + (0, 1, 2), 4, rtol=1e-7, atol=1e-13), + + data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', + (0, 1, 2), 5, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', + (0, 1, 2), 6, rtol=1e-7, atol=1e-13), +] + + +@pytest.mark.parametrize('test', GSL_TESTS, ids=repr) +def test_gsl(test): + _test_factory(test) + + +LOCAL_TESTS = [ + data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2), + data_local(ellipkm1, 'ellipkm1', 0, 1), + data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2), + data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14), + data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14), + data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12), + data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11), + data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13), + data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13), + data_local(wright_bessel, 'wright_bessel', (0, 1, 2), 3, rtol=1e-11), +] + + +@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr) +def test_local(test): + _test_factory(test) + + +def _test_factory(test, dtype=np.float64): + """Boost test""" + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected") + with np.errstate(all='ignore'): + test.check(dtype=dtype) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_dd.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_dd.py new file mode 100644 index 0000000000000000000000000000000000000000..45c8c88a5e9b297871fa0812c099fbee213c007d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_dd.py @@ -0,0 +1,46 @@ +# Tests for a few of the "double-double" C functions defined in cephes/dd_*. + +import pytest +from numpy.testing import assert_allclose +from scipy.special._test_internal import _dd_exp, _dd_log, _dd_expm1 + + +# Each tuple in test_data contains: +# (dd_func, xhi, xlo, expected_yhi, expected_ylo) +# The expected values were computed with mpmath, e.g. +# +# import mpmath +# mpmath.mp.dps = 100 +# xhi = 10.0 +# xlo = 0.0 +# x = mpmath.mpf(xhi) + mpmath.mpf(xlo) +# y = mpmath.log(x) +# expected_yhi = float(y) +# expected_ylo = float(y - expected_yhi) +# +test_data = [ + (_dd_exp, -0.3333333333333333, -1.850371707708594e-17, + 0.7165313105737893, -2.0286948382455594e-17), + (_dd_exp, 0.0, 0.0, 1.0, 0.0), + (_dd_exp, 10.0, 0.0, 22026.465794806718, -1.3780134700517372e-12), + (_dd_log, 0.03125, 0.0, -3.4657359027997265, -4.930038229799327e-18), + (_dd_log, 10.0, 0.0, 2.302585092994046, -2.1707562233822494e-16), + (_dd_expm1, -1.25, 0.0, -0.7134952031398099, -4.7031321153650186e-17), + (_dd_expm1, -0.484375, 0.0, -0.3839178722093218, 7.609376052156984e-18), + (_dd_expm1, -0.25, 0.0, -0.22119921692859512, -1.0231869534531498e-17), + (_dd_expm1, -0.0625, 0.0, -0.06058693718652421, -7.077887227488846e-19), + (_dd_expm1, 0.0, 0.0, 0.0, 0.0), + (_dd_expm1, 0.0625, 3.5e-18, 0.06449445891785943, 1.4323095758164254e-18), + (_dd_expm1, 0.25, 0.0, 0.2840254166877415, -2.133257464457841e-17), + (_dd_expm1, 0.498046875, 0.0, 0.645504254608231, -9.198435524984236e-18), + (_dd_expm1, 1.25, 0.0, 2.4903429574618414, -4.604261945372796e-17) +] + + +@pytest.mark.parametrize('dd_func, xhi, xlo, expected_yhi, expected_ylo', + test_data) +def test_dd(dd_func, xhi, xlo, expected_yhi, expected_ylo): + yhi, ylo = dd_func(xhi, xlo) + assert yhi == expected_yhi, (f"high double ({yhi}) does not equal the " + f"expected value {expected_yhi}") + assert_allclose(ylo, expected_ylo, rtol=5e-15) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f27dc7b71c1ae928b4bdd8bd987df9ca420bab --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py @@ -0,0 +1,45 @@ +import numpy as np +from numpy import pi, log, sqrt +from numpy.testing import assert_, assert_equal + +from scipy.special._testutils import FuncData +import scipy.special as sc + +# Euler-Mascheroni constant +euler = 0.57721566490153286 + + +def test_consistency(): + # Make sure the implementation of digamma for real arguments + # agrees with the implementation of digamma for complex arguments. + + # It's all poles after -1e16 + x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)] + dataset = np.vstack((x + 0j, sc.digamma(x))).T + FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check() + + +def test_special_values(): + # Test special values from Gauss's digamma theorem. See + # + # https://en.wikipedia.org/wiki/Digamma_function + + dataset = [ + (1, -euler), + (0.5, -2*log(2) - euler), + (1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler), + (1/4, -pi/2 - 3*log(2) - euler), + (1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler), + (1/8, + -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler) + ] + + dataset = np.asarray(dataset) + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() + + +def test_nonfinite(): + pts = [0.0, -0.0, np.inf] + std = [-np.inf, np.inf, np.inf] + assert_equal(sc.digamma(pts), std) + assert_(all(np.isnan(sc.digamma([-np.inf, -1])))) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ellip_harm.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ellip_harm.py new file mode 100644 index 0000000000000000000000000000000000000000..a97c2468633062fbb4d858c9b22d60fff9bc9e24 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ellip_harm.py @@ -0,0 +1,278 @@ +# +# Tests for the Ellipsoidal Harmonic Function, +# Distributed under the same license as SciPy itself. +# + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose, + assert_, suppress_warnings) +from scipy.special._testutils import assert_func_equal +from scipy.special import ellip_harm, ellip_harm_2, ellip_normal +from scipy.integrate import IntegrationWarning +from numpy import sqrt, pi + + +def test_ellip_potential(): + def change_coefficient(lambda1, mu, nu, h2, k2): + x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2)) + y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2))) + z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2))) + return x, y, z + + def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2): + return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu) + * ellip_harm(h2, k2, n, p, nu)) + + def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2): + return (ellip_harm_2(h2, k2, n, p, lambda1) + * ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu)) + + def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2): + tol = 1e-8 + sum1 = 0 + for n in range(20): + xsum = 0 + for p in range(1, 2*n+2): + xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2) + * solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) / + (ellip_normal(h2, k2, n, p)*(2*n + 1))) + if abs(xsum) < 0.1*tol*abs(sum1): + break + sum1 += xsum + return sum1, xsum + + def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2): + x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2) + x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2) + res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2) + return 1/res + + pts = [ + (120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25), + (120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20), + ] + + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + sup.filter(IntegrationWarning, "The maximum number of subdivisions") + + for p in pts: + err_msg = repr(p) + exact = potential(*p) + result, last_term = summation(*p) + assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg) + assert_(abs(result - exact) < 10*abs(last_term), err_msg) + + +def test_ellip_norm(): + + def G01(h2, k2): + return 4*pi + + def G11(h2, k2): + return 4*pi*h2*k2/3 + + def G12(h2, k2): + return 4*pi*h2*(k2 - h2)/3 + + def G13(h2, k2): + return 4*pi*k2*(k2 - h2)/3 + + def G22(h2, k2): + res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 + + sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2))) + return 16*pi/405*res + + def G21(h2, k2): + res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 + + sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2))) + return 16*pi/405*res + + def G23(h2, k2): + return 4*pi*h2**2*k2*(k2 - h2)/15 + + def G24(h2, k2): + return 4*pi*h2*k2**2*(k2 - h2)/15 + + def G25(h2, k2): + return 4*pi*h2*k2*(k2 - h2)**2/15 + + def G32(h2, k2): + res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2 + + sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) + + 11*h2*k2*(h2 + k2))) + return 16*pi/13125*k2*h2*res + + def G31(h2, k2): + res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2 + + sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) - + 11*h2*k2*(h2 + k2))) + return 16*pi/13125*h2*k2*res + + def G34(h2, k2): + res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 + + 13*h2*k2**2)) + return 16*pi/13125*h2*(k2 - h2)*res + + def G33(h2, k2): + res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 - + 13*h2*k2**2)) + return 16*pi/13125*h2*(k2 - h2)*res + + def G36(h2, k2): + res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 + + 9*h2*k2**2)) + return 16*pi/13125*k2*(k2 - h2)*res + + def G35(h2, k2): + res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 - + 9*h2*k2**2)) + return 16*pi/13125*k2*(k2 - h2)*res + + def G37(h2, k2): + return 4*pi*h2**2*k2**2*(k2 - h2)**2/105 + + known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13, + (2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24, + (2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33, + (3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37} + + def _ellip_norm(n, p, h2, k2): + func = known_funcs[n, p] + return func(h2, k2) + _ellip_norm = np.vectorize(_ellip_norm) + + def ellip_normal_known(h2, k2, n, p): + return _ellip_norm(n, p, h2, k2) + + # generate both large and small h2 < k2 pairs + np.random.seed(1234) + h2 = np.random.pareto(0.5, size=1) + k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size)) + + points = [] + for n in range(4): + for p in range(1, 2*n+2): + points.append((h2, k2, np.full(h2.size, n), np.full(h2.size, p))) + points = np.array(points) + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12) + + +def test_ellip_harm_2(): + + def I1(h2, k2, s): + res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s)) + + ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) + + ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s))) + return res + + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8)))) + + # Values produced by code from arXiv:1204.0267 + assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382) + assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809) + assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743) + assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306) + assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454) + + +def test_ellip_harm(): + + def E01(h2, k2, s): + return 1 + + def E11(h2, k2, s): + return s + + def E12(h2, k2, s): + return sqrt(abs(s*s - h2)) + + def E13(h2, k2, s): + return sqrt(abs(s*s - k2)) + + def E21(h2, k2, s): + return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2))) + + def E22(h2, k2, s): + return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2))) + + def E23(h2, k2, s): + return s * sqrt(abs(s*s - h2)) + + def E24(h2, k2, s): + return s * sqrt(abs(s*s - k2)) + + def E25(h2, k2, s): + return sqrt(abs((s*s - h2)*(s*s - k2))) + + def E31(h2, k2, s): + return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) - + 15*h2*k2)) + + def E32(h2, k2, s): + return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) - + 15*h2*k2)) + + def E33(h2, k2, s): + return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 + + 2*k2)*(h2 + 2*k2) - 5*h2*k2)))) + + def E34(h2, k2, s): + return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 + + 2*k2)*(h2 + 2*k2) - 5*h2*k2)))) + + def E35(h2, k2, s): + return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2 + + k2)*(2*h2 + k2) - 5*h2*k2)))) + + def E36(h2, k2, s): + return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2 + + k2)*(2*h2 + k2) - 5*h2*k2)))) + + def E37(h2, k2, s): + return s * sqrt(abs((s*s - h2)*(s*s - k2))) + + assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1), + ellip_harm(5, 8, 1, 2, 2.5)) + + known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13, + (2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24, + (2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33, + (3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37} + + point_ref = [] + + def ellip_harm_known(h2, k2, n, p, s): + for i in range(h2.size): + func = known_funcs[(int(n[i]), int(p[i]))] + point_ref.append(func(h2[i], k2[i], s[i])) + return point_ref + + np.random.seed(1234) + h2 = np.random.pareto(0.5, size=30) + k2 = h2*(1 + np.random.pareto(0.5, size=h2.size)) + s = np.random.pareto(0.5, size=h2.size) + points = [] + for i in range(h2.size): + for n in range(4): + for p in range(1, 2*n+2): + points.append((h2[i], k2[i], n, p, s[i])) + points = np.array(points) + assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12) + + +def test_ellip_harm_invalid_p(): + # Regression test. This should return nan. + n = 4 + # Make p > 2*n + 1. + p = 2*n + 2 + result = ellip_harm(0.5, 2.0, n, p, 0.2) + assert np.isnan(result) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_erfinv.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_erfinv.py new file mode 100644 index 0000000000000000000000000000000000000000..98739b93fc6ad75a41a7b80107ee696453b12a09 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_erfinv.py @@ -0,0 +1,89 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import pytest + +import scipy.special as sc + + +class TestInverseErrorFunction: + def test_compliment(self): + # Test erfcinv(1 - x) == erfinv(x) + x = np.linspace(-1, 1, 101) + assert_allclose(sc.erfcinv(1 - x), sc.erfinv(x), rtol=0, atol=1e-15) + + def test_literal_values(self): + # The expected values were calculated with mpmath: + # + # import mpmath + # mpmath.mp.dps = 200 + # for y in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]: + # x = mpmath.erfinv(y) + # print(x) + # + y = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) + actual = sc.erfinv(y) + expected = [ + 0.0, + 0.08885599049425769, + 0.1791434546212917, + 0.2724627147267543, + 0.37080715859355795, + 0.4769362762044699, + 0.5951160814499948, + 0.7328690779592167, + 0.9061938024368233, + 1.1630871536766743, + ] + assert_allclose(actual, expected, rtol=0, atol=1e-15) + + @pytest.mark.parametrize( + 'f, x, y', + [ + (sc.erfinv, -1, -np.inf), + (sc.erfinv, 0, 0), + (sc.erfinv, 1, np.inf), + (sc.erfinv, -100, np.nan), + (sc.erfinv, 100, np.nan), + (sc.erfcinv, 0, np.inf), + (sc.erfcinv, 1, -0.0), + (sc.erfcinv, 2, -np.inf), + (sc.erfcinv, -100, np.nan), + (sc.erfcinv, 100, np.nan), + ], + ids=[ + 'erfinv at lower bound', + 'erfinv at midpoint', + 'erfinv at upper bound', + 'erfinv below lower bound', + 'erfinv above upper bound', + 'erfcinv at lower bound', + 'erfcinv at midpoint', + 'erfcinv at upper bound', + 'erfcinv below lower bound', + 'erfcinv above upper bound', + ] + ) + def test_domain_bounds(self, f, x, y): + assert_equal(f(x), y) + + def test_erfinv_asympt(self): + # regression test for gh-12758: erfinv(x) loses precision at small x + # expected values precomputed with mpmath: + # >>> mpmath.mp.dps = 100 + # >>> expected = [float(mpmath.erfinv(t)) for t in x] + x = np.array([1e-20, 1e-15, 1e-14, 1e-10, 1e-8, 0.9e-7, 1.1e-7, 1e-6]) + expected = np.array([8.86226925452758e-21, + 8.862269254527581e-16, + 8.86226925452758e-15, + 8.862269254527581e-11, + 8.86226925452758e-09, + 7.97604232907484e-08, + 9.74849617998037e-08, + 8.8622692545299e-07]) + assert_allclose(sc.erfinv(x), expected, + rtol=1e-15) + + # also test the roundtrip consistency + assert_allclose(sc.erf(sc.erfinv(x)), + x, + rtol=5e-15) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_exponential_integrals.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_exponential_integrals.py new file mode 100644 index 0000000000000000000000000000000000000000..8332a83267e2f75dded04e80443c150c832676c8 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_exponential_integrals.py @@ -0,0 +1,118 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc + + +class TestExp1: + + def test_branch_cut(self): + assert np.isnan(sc.exp1(-1)) + assert sc.exp1(complex(-1, 0)).imag == ( + -sc.exp1(complex(-1, -0.0)).imag + ) + + assert_allclose( + sc.exp1(complex(-1, 0)), + sc.exp1(-1 + 1e-20j), + atol=0, + rtol=1e-15 + ) + assert_allclose( + sc.exp1(complex(-1, -0.0)), + sc.exp1(-1 - 1e-20j), + atol=0, + rtol=1e-15 + ) + + def test_834(self): + # Regression test for #834 + a = sc.exp1(-complex(19.9999990)) + b = sc.exp1(-complex(19.9999991)) + assert_allclose(a.imag, b.imag, atol=0, rtol=1e-15) + + +class TestScaledExp1: + + @pytest.mark.parametrize('x, expected', [(0, 0), (np.inf, 1)]) + def test_limits(self, x, expected): + y = sc._ufuncs._scaled_exp1(x) + assert y == expected + + # The expected values were computed with mpmath, e.g.: + # + # from mpmath import mp + # mp.dps = 80 + # x = 1e-25 + # print(float(x*mp.exp(x)*np.expint(1, x))) + # + # prints 5.698741165994961e-24 + # + # The method used to compute _scaled_exp1 changes at x=1 + # and x=1250, so values at those inputs, and values just + # above and below them, are included in the test data. + @pytest.mark.parametrize('x, expected', + [(1e-25, 5.698741165994961e-24), + (0.1, 0.20146425447084518), + (0.9995, 0.5962509885831002), + (1.0, 0.5963473623231941), + (1.0005, 0.5964436833238044), + (2.5, 0.7588145912149602), + (10.0, 0.9156333393978808), + (100.0, 0.9901942286733019), + (500.0, 0.9980079523802055), + (1000.0, 0.9990019940238807), + (1249.5, 0.9992009578306811), + (1250.0, 0.9992012769377913), + (1250.25, 0.9992014363957858), + (2000.0, 0.9995004992514963), + (1e4, 0.9999000199940024), + (1e10, 0.9999999999), + (1e15, 0.999999999999999), + ]) + def test_scaled_exp1(self, x, expected): + y = sc._ufuncs._scaled_exp1(x) + assert_allclose(y, expected, rtol=2e-15) + + +class TestExpi: + + @pytest.mark.parametrize('result', [ + sc.expi(complex(-1, 0)), + sc.expi(complex(-1, -0.0)), + sc.expi(-1) + ]) + def test_branch_cut(self, result): + desired = -0.21938393439552027368 # Computed using Mpmath + assert_allclose(result, desired, atol=0, rtol=1e-14) + + def test_near_branch_cut(self): + lim_from_above = sc.expi(-1 + 1e-20j) + lim_from_below = sc.expi(-1 - 1e-20j) + assert_allclose( + lim_from_above.real, + lim_from_below.real, + atol=0, + rtol=1e-15 + ) + assert_allclose( + lim_from_above.imag, + -lim_from_below.imag, + atol=0, + rtol=1e-15 + ) + + def test_continuity_on_positive_real_axis(self): + assert_allclose( + sc.expi(complex(1, 0)), + sc.expi(complex(1, -0.0)), + atol=0, + rtol=1e-15 + ) + + +class TestExpn: + + def test_out_of_domain(self): + assert all(np.isnan([sc.expn(-1, 1.0), sc.expn(1, -1.0)])) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py new file mode 100644 index 0000000000000000000000000000000000000000..8868f66c47ce0d4bbb21c78435a6c89d44065252 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py @@ -0,0 +1,85 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc +from scipy.special._testutils import FuncData + + +class TestVoigtProfile: + + @pytest.mark.parametrize('x, sigma, gamma', [ + (np.nan, 1, 1), + (0, np.nan, 1), + (0, 1, np.nan), + (1, np.nan, 0), + (np.nan, 1, 0), + (1, 0, np.nan), + (np.nan, 0, 1), + (np.nan, 0, 0) + ]) + def test_nan(self, x, sigma, gamma): + assert np.isnan(sc.voigt_profile(x, sigma, gamma)) + + @pytest.mark.parametrize('x, desired', [ + (-np.inf, 0), + (np.inf, 0) + ]) + def test_inf(self, x, desired): + assert sc.voigt_profile(x, 1, 1) == desired + + def test_against_mathematica(self): + # Results obtained from Mathematica by computing + # + # PDF[VoigtDistribution[gamma, sigma], x] + # + points = np.array([ + [-7.89, 45.06, 6.66, 0.0077921073660388806401], + [-0.05, 7.98, 24.13, 0.012068223646769913478], + [-13.98, 16.83, 42.37, 0.0062442236362132357833], + [-12.66, 0.21, 6.32, 0.010052516161087379402], + [11.34, 4.25, 21.96, 0.0113698923627278917805], + [-11.56, 20.40, 30.53, 0.0076332760432097464987], + [-9.17, 25.61, 8.32, 0.011646345779083005429], + [16.59, 18.05, 2.50, 0.013637768837526809181], + [9.11, 2.12, 39.33, 0.0076644040807277677585], + [-43.33, 0.30, 45.68, 0.0036680463875330150996] + ]) + FuncData( + sc.voigt_profile, + points, + (0, 1, 2), + 3, + atol=0, + rtol=1e-15 + ).check() + + def test_symmetry(self): + x = np.linspace(0, 10, 20) + assert_allclose( + sc.voigt_profile(x, 1, 1), + sc.voigt_profile(-x, 1, 1), + rtol=1e-15, + atol=0 + ) + + @pytest.mark.parametrize('x, sigma, gamma, desired', [ + (0, 0, 0, np.inf), + (1, 0, 0, 0) + ]) + def test_corner_cases(self, x, sigma, gamma, desired): + assert sc.voigt_profile(x, sigma, gamma) == desired + + @pytest.mark.parametrize('sigma1, gamma1, sigma2, gamma2', [ + (0, 1, 1e-16, 1), + (1, 0, 1, 1e-16), + (0, 0, 1e-16, 1e-16) + ]) + def test_continuity(self, sigma1, gamma1, sigma2, gamma2): + x = np.linspace(1, 10, 20) + assert_allclose( + sc.voigt_profile(x, sigma1, gamma1), + sc.voigt_profile(x, sigma2, gamma2), + rtol=1e-16, + atol=1e-16 + ) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_gamma.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_gamma.py new file mode 100644 index 0000000000000000000000000000000000000000..2e3fbd17dddeed73d311566a930f52899e3b9db6 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_gamma.py @@ -0,0 +1,12 @@ +import numpy as np +import scipy.special as sc + + +class TestRgamma: + + def test_gh_11315(self): + assert sc.rgamma(-35) == 0 + + def test_rgamma_zeros(self): + x = np.array([0, -10, -100, -1000, -10000]) + assert np.all(sc.rgamma(x) == 0) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py new file mode 100644 index 0000000000000000000000000000000000000000..aae34e5c23f2d293f362abd825f1dad454371ae0 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py @@ -0,0 +1,136 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal + +import scipy.special as sc +from scipy.special._testutils import FuncData + + +INVALID_POINTS = [ + (1, -1), + (0, 0), + (-1, 1), + (np.nan, 1), + (1, np.nan) +] + + +class TestGammainc: + + @pytest.mark.parametrize('a, x', INVALID_POINTS) + def test_domain(self, a, x): + assert np.isnan(sc.gammainc(a, x)) + + def test_a_eq_0_x_gt_0(self): + assert sc.gammainc(0, 1) == 1 + + @pytest.mark.parametrize('a, x, desired', [ + (np.inf, 1, 0), + (np.inf, 0, 0), + (np.inf, np.inf, np.nan), + (1, np.inf, 1) + ]) + def test_infinite_arguments(self, a, x, desired): + result = sc.gammainc(a, x) + if np.isnan(desired): + assert np.isnan(result) + else: + assert result == desired + + def test_infinite_limits(self): + # Test that large arguments converge to the hard-coded limits + # at infinity. + assert_allclose( + sc.gammainc(1000, 100), + sc.gammainc(np.inf, 100), + atol=1e-200, # Use `atol` since the function converges to 0. + rtol=0 + ) + assert sc.gammainc(100, 1000) == sc.gammainc(100, np.inf) + + def test_x_zero(self): + a = np.arange(1, 10) + assert_array_equal(sc.gammainc(a, 0), 0) + + def test_limit_check(self): + result = sc.gammainc(1e-10, 1) + limit = sc.gammainc(0, 1) + assert np.isclose(result, limit) + + def gammainc_line(self, x): + # The line a = x where a simpler asymptotic expansion (analog + # of DLMF 8.12.15) is available. + c = np.array([-1/3, -1/540, 25/6048, 101/155520, + -3184811/3695155200, -2745493/8151736420]) + res = 0 + xfac = 1 + for ck in c: + res -= ck*xfac + xfac /= x + res /= np.sqrt(2*np.pi*x) + res += 0.5 + return res + + def test_line(self): + x = np.logspace(np.log10(25), 300, 500) + a = x + dataset = np.vstack((a, x, self.gammainc_line(x))).T + FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check() + + def test_roundtrip(self): + a = np.logspace(-5, 10, 100) + x = np.logspace(-5, 10, 100) + + y = sc.gammaincinv(a, sc.gammainc(a, x)) + assert_allclose(x, y, rtol=1e-10) + + +class TestGammaincc: + + @pytest.mark.parametrize('a, x', INVALID_POINTS) + def test_domain(self, a, x): + assert np.isnan(sc.gammaincc(a, x)) + + def test_a_eq_0_x_gt_0(self): + assert sc.gammaincc(0, 1) == 0 + + @pytest.mark.parametrize('a, x, desired', [ + (np.inf, 1, 1), + (np.inf, 0, 1), + (np.inf, np.inf, np.nan), + (1, np.inf, 0) + ]) + def test_infinite_arguments(self, a, x, desired): + result = sc.gammaincc(a, x) + if np.isnan(desired): + assert np.isnan(result) + else: + assert result == desired + + def test_infinite_limits(self): + # Test that large arguments converge to the hard-coded limits + # at infinity. + assert sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100) + assert_allclose( + sc.gammaincc(100, 1000), + sc.gammaincc(100, np.inf), + atol=1e-200, # Use `atol` since the function converges to 0. + rtol=0 + ) + + def test_limit_check(self): + result = sc.gammaincc(1e-10,1) + limit = sc.gammaincc(0,1) + assert np.isclose(result, limit) + + def test_x_zero(self): + a = np.arange(1, 10) + assert_array_equal(sc.gammaincc(a, 0), 1) + + def test_roundtrip(self): + a = np.logspace(-5, 10, 100) + x = np.logspace(-5, 10, 100) + + y = sc.gammainccinv(a, sc.gammaincc(a, x)) + assert_allclose(x, y, rtol=1e-14) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_hypergeometric.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_hypergeometric.py new file mode 100644 index 0000000000000000000000000000000000000000..749a7e357417667d72d4295678d70830f6b93eb1 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_hypergeometric.py @@ -0,0 +1,140 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import scipy.special as sc + + +class TestHyperu: + + def test_negative_x(self): + a, b, x = np.meshgrid( + [-1, -0.5, 0, 0.5, 1], + [-1, -0.5, 0, 0.5, 1], + np.linspace(-100, -1, 10), + ) + assert np.all(np.isnan(sc.hyperu(a, b, x))) + + def test_special_cases(self): + assert sc.hyperu(0, 1, 1) == 1.0 + + @pytest.mark.parametrize('a', [0.5, 1, np.nan]) + @pytest.mark.parametrize('b', [1, 2, np.nan]) + @pytest.mark.parametrize('x', [0.25, 3, np.nan]) + def test_nan_inputs(self, a, b, x): + assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x])) + + +class TestHyp1f1: + + @pytest.mark.parametrize('a, b, x', [ + (np.nan, 1, 1), + (1, np.nan, 1), + (1, 1, np.nan) + ]) + def test_nan_inputs(self, a, b, x): + assert np.isnan(sc.hyp1f1(a, b, x)) + + def test_poles(self): + assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.inf) + + @pytest.mark.parametrize('a, b, x, result', [ + (-1, 1, 0.5, 0.5), + (1, 1, 0.5, 1.6487212707001281468), + (2, 1, 0.5, 2.4730819060501922203), + (1, 2, 0.5, 1.2974425414002562937), + (-10, 1, 0.5, -0.38937441413785204475) + ]) + def test_special_cases(self, a, b, x, result): + # Hit all the special case branches at the beginning of the + # function. Desired answers computed using Mpmath. + assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15) + + @pytest.mark.parametrize('a, b, x, result', [ + (1, 1, 0.44, 1.5527072185113360455), + (-1, 1, 0.44, 0.55999999999999999778), + (100, 100, 0.89, 2.4351296512898745592), + (-100, 100, 0.89, 0.40739062490768104667), + (1.5, 100, 59.99, 3.8073513625965598107), + (-1.5, 100, 59.99, 0.25099240047125826943) + ]) + def test_geometric_convergence(self, a, b, x, result): + # Test the region where we are relying on the ratio of + # + # (|a| + 1) * |x| / |b| + # + # being small. Desired answers computed using Mpmath + assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15) + + @pytest.mark.parametrize('a, b, x, result', [ + (-1, 1, 1.5, -0.5), + (-10, 1, 1.5, 0.41801777430943080357), + (-25, 1, 1.5, 0.25114491646037839809), + (-50, 1, 1.5, -0.25683643975194756115), + (-80, 1, 1.5, -0.24554329325751503601), + (-150, 1, 1.5, -0.173364795515420454496), + ]) + def test_a_negative_integer(self, a, b, x, result): + # Desired answers computed using Mpmath. + assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=2e-14) + + @pytest.mark.parametrize('a, b, x, expected', [ + (0.01, 150, -4, 0.99973683897677527773), # gh-3492 + (1, 5, 0.01, 1.0020033381011970966), # gh-3593 + (50, 100, 0.01, 1.0050126452421463411), # gh-3593 + (1, 0.3, -1e3, -7.011932249442947651455e-04), # gh-14149 + (1, 0.3, -1e4, -7.001190321418937164734e-05), # gh-14149 + (9, 8.5, -350, -5.224090831922378361082e-20), # gh-17120 + (9, 8.5, -355, -4.595407159813368193322e-20), # gh-17120 + (75, -123.5, 15, 3.425753920814889017493e+06), + ]) + def test_assorted_cases(self, a, b, x, expected): + # Expected values were computed with mpmath.hyp1f1(a, b, x). + assert_allclose(sc.hyp1f1(a, b, x), expected, atol=0, rtol=1e-14) + + def test_a_neg_int_and_b_equal_x(self): + # This is a case where the Boost wrapper will call hypergeometric_pFq + # instead of hypergeometric_1F1. When we use a version of Boost in + # which https://github.com/boostorg/math/issues/833 is fixed, this + # test case can probably be moved into test_assorted_cases. + # The expected value was computed with mpmath.hyp1f1(a, b, x). + a = -10.0 + b = 2.5 + x = 2.5 + expected = 0.0365323664364104338721 + computed = sc.hyp1f1(a, b, x) + assert_allclose(computed, expected, atol=0, rtol=1e-13) + + @pytest.mark.parametrize('a, b, x, desired', [ + (-1, -2, 2, 2), + (-1, -4, 10, 3.5), + (-2, -2, 1, 2.5) + ]) + def test_gh_11099(self, a, b, x, desired): + # All desired results computed using Mpmath + assert sc.hyp1f1(a, b, x) == desired + + @pytest.mark.parametrize('a', [-3, -2]) + def test_x_zero_a_and_b_neg_ints_and_a_ge_b(self, a): + assert sc.hyp1f1(a, -3, 0) == 1 + + # The "legacy edge cases" mentioned in the comments in the following + # tests refers to the behavior of hyp1f1(a, b, x) when b is a nonpositive + # integer. In some subcases, the behavior of SciPy does not match that + # of Boost (1.81+), mpmath and Mathematica (via Wolfram Alpha online). + # If the handling of these edges cases is changed to agree with those + # libraries, these test will have to be updated. + + @pytest.mark.parametrize('b', [0, -1, -5]) + def test_legacy_case1(self, b): + # Test results of hyp1f1(0, n, x) for n <= 0. + # This is a legacy edge case. + # Boost (versions greater than 1.80), Mathematica (via Wolfram Alpha + # online) and mpmath all return 1 in this case, but SciPy's hyp1f1 + # returns inf. + assert_equal(sc.hyp1f1(0, b, [-1.5, 0, 1.5]), [np.inf, np.inf, np.inf]) + + def test_legacy_case2(self): + # This is a legacy edge case. + # In software such as boost (1.81+), mpmath and Mathematica, + # the value is 1. + assert sc.hyp1f1(-4, -3, 0) == np.inf diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_kolmogorov.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_kolmogorov.py new file mode 100644 index 0000000000000000000000000000000000000000..bc427b0584ab87307c50ffb120fb8bc66a26df5a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_kolmogorov.py @@ -0,0 +1,495 @@ +import itertools +import sys +import pytest + +import numpy as np +from numpy.testing import assert_ +from scipy.special._testutils import FuncData + +from scipy.special import kolmogorov, kolmogi, smirnov, smirnovi +from scipy.special._ufuncs import (_kolmogc, _kolmogci, _kolmogp, + _smirnovc, _smirnovci, _smirnovp) + +_rtol = 1e-10 + +class TestSmirnov: + def test_nan(self): + assert_(np.isnan(smirnov(1, np.nan))) + + def test_basic(self): + dataset = [(1, 0.1, 0.9), + (1, 0.875, 0.125), + (2, 0.875, 0.125 * 0.125), + (3, 0.875, 0.125 * 0.125 * 0.125)] + + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0(self): + dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_1(self): + dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0point5(self): + dataset = [(1, 0.5, 0.5), + (2, 0.5, 0.25), + (3, 0.5, 0.166666666667), + (4, 0.5, 0.09375), + (5, 0.5, 0.056), + (6, 0.5, 0.0327932098765), + (7, 0.5, 0.0191958707681), + (8, 0.5, 0.0112953186035), + (9, 0.5, 0.00661933257355), + (10, 0.5, 0.003888705)] + + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_1(self): + x = np.linspace(0, 1, 101, endpoint=True) + dataset = np.column_stack([[1]*len(x), x, 1-x]) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_2(self): + x = np.linspace(0.5, 1, 101, endpoint=True) + p = np.power(1-x, 2) + n = np.array([2] * len(x)) + dataset = np.column_stack([n, x, p]) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_3(self): + x = np.linspace(0.7, 1, 31, endpoint=True) + p = np.power(1-x, 3) + n = np.array([3] * len(x)) + dataset = np.column_stack([n, x, p]) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_large(self): + # test for large values of n + # Probabilities should go down as n goes up + x = 0.4 + pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)]) + dfs = np.diff(pvals) + assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs) + + +class TestSmirnovi: + def test_nan(self): + assert_(np.isnan(smirnovi(1, np.nan))) + + def test_basic(self): + dataset = [(1, 0.4, 0.6), + (1, 0.6, 0.4), + (1, 0.99, 0.01), + (1, 0.01, 0.99), + (2, 0.125 * 0.125, 0.875), + (3, 0.125 * 0.125 * 0.125, 0.875), + (10, 1.0 / 16 ** 10, 1 - 1.0 / 16)] + + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0(self): + dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_1(self): + dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_1(self): + pp = np.linspace(0, 1, 101, endpoint=True) + # dataset = np.array([(1, p, 1-p) for p in pp]) + dataset = np.column_stack([[1]*len(pp), pp, 1-pp]) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_2(self): + x = np.linspace(0.5, 1, 101, endpoint=True) + p = np.power(1-x, 2) + n = np.array([2] * len(x)) + dataset = np.column_stack([n, p, x]) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_3(self): + x = np.linspace(0.7, 1, 31, endpoint=True) + p = np.power(1-x, 3) + n = np.array([3] * len(x)) + dataset = np.column_stack([n, p, x]) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_round_trip(self): + def _sm_smi(n, p): + return smirnov(n, smirnovi(n, p)) + + def _smc_smci(n, p): + return _smirnovc(n, _smirnovci(n, p)) + + dataset = [(1, 0.4, 0.4), + (1, 0.6, 0.6), + (2, 0.875, 0.875), + (3, 0.875, 0.875), + (3, 0.125, 0.125), + (10, 0.999, 0.999), + (10, 0.0001, 0.0001)] + + dataset = np.asarray(dataset) + FuncData( + _sm_smi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + FuncData( + _smc_smci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0point5(self): + dataset = [(1, 0.5, 0.5), + (2, 0.5, 0.366025403784), + (2, 0.25, 0.5), + (3, 0.5, 0.297156508177), + (4, 0.5, 0.255520481121), + (5, 0.5, 0.234559536069), + (6, 0.5, 0.21715965898), + (7, 0.5, 0.202722580034), + (8, 0.5, 0.190621765256), + (9, 0.5, 0.180363501362), + (10, 0.5, 0.17157867006)] + + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + +class TestSmirnovp: + def test_nan(self): + assert_(np.isnan(_smirnovp(1, np.nan))) + + def test_basic(self): + # Check derivative at endpoints + n1_10 = np.arange(1, 10) + dataset0 = np.column_stack([n1_10, + np.full_like(n1_10, 0), + np.full_like(n1_10, -1)]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + n2_10 = np.arange(2, 10) + dataset1 = np.column_stack([n2_10, + np.full_like(n2_10, 1.0), + np.full_like(n2_10, 0)]) + FuncData( + _smirnovp, dataset1, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_oneminusoneovern(self): + # Check derivative at x=1-1/n + n = np.arange(1, 20) + x = 1.0/n + xm1 = 1-1.0/n + pp1 = -n * x**(n-1) + pp1 -= (1-np.sign(n-2)**2) * 0.5 # n=2, x=0.5, 1-1/n = 0.5, need to adjust + dataset1 = np.column_stack([n, xm1, pp1]) + FuncData( + _smirnovp, dataset1, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_oneovertwon(self): + # Check derivative at x=1/2n (Discontinuous at x=1/n, so check at x=1/2n) + n = np.arange(1, 20) + x = 1.0/2/n + pp = -(n*x+1) * (1+x)**(n-2) + dataset0 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_oneovern(self): + # Check derivative at x=1/n + # (Discontinuous at x=1/n, hard to tell if x==1/n, only use n=power of 2) + n = 2**np.arange(1, 10) + x = 1.0/n + pp = -(n*x+1) * (1+x)**(n-2) + 0.5 + dataset0 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + @pytest.mark.xfail(sys.maxsize <= 2**32, + reason="requires 64-bit platform") + def test_oneovernclose(self): + # Check derivative at x=1/n + # (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon) + n = np.arange(3, 20) + + x = 1.0/n - 2*np.finfo(float).eps + pp = -(n*x+1) * (1+x)**(n-2) + dataset0 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + x = 1.0/n + 2*np.finfo(float).eps + pp = -(n*x+1) * (1+x)**(n-2) + 1 + dataset1 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset1, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + +class TestKolmogorov: + def test_nan(self): + assert_(np.isnan(kolmogorov(np.nan))) + + def test_basic(self): + dataset = [(0, 1.0), + (0.5, 0.96394524366487511), + (0.8275735551899077, 0.5000000000000000), + (1, 0.26999967167735456), + (2, 0.00067092525577969533)] + + dataset = np.asarray(dataset) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + + def test_linspace(self): + x = np.linspace(0, 2.0, 21) + dataset = [1.0000000000000000, 1.0000000000000000, 0.9999999999994950, + 0.9999906941986655, 0.9971923267772983, 0.9639452436648751, + 0.8642827790506042, 0.7112351950296890, 0.5441424115741981, + 0.3927307079406543, 0.2699996716773546, 0.1777181926064012, + 0.1122496666707249, 0.0680922218447664, 0.0396818795381144, + 0.0222179626165251, 0.0119520432391966, 0.0061774306344441, + 0.0030676213475797, 0.0014636048371873, 0.0006709252557797] + + dataset_c = [0.0000000000000000, 6.609305242245699e-53, 5.050407338670114e-13, + 9.305801334566668e-06, 0.0028076732227017, 0.0360547563351249, + 0.1357172209493958, 0.2887648049703110, 0.4558575884258019, + 0.6072692920593457, 0.7300003283226455, 0.8222818073935988, + 0.8877503333292751, 0.9319077781552336, 0.9603181204618857, + 0.9777820373834749, 0.9880479567608034, 0.9938225693655559, + 0.9969323786524203, 0.9985363951628127, 0.9993290747442203] + + dataset = np.column_stack([x, dataset]) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + dataset_c = np.column_stack([x, dataset_c]) + FuncData(_kolmogc, dataset_c, (0,), 1, rtol=_rtol).check() + + def test_linspacei(self): + p = np.linspace(0, 1.0, 21, endpoint=True) + dataset = [np.inf, 1.3580986393225507, 1.2238478702170823, + 1.1379465424937751, 1.0727491749396481, 1.0191847202536859, + 0.9730633753323726, 0.9320695842357622, 0.8947644549851197, + 0.8601710725555463, 0.8275735551899077, 0.7964065373291559, + 0.7661855555617682, 0.7364542888171910, 0.7067326523068980, + 0.6764476915028201, 0.6448126061663567, 0.6105590999244391, + 0.5711732651063401, 0.5196103791686224, 0.0000000000000000] + + dataset_c = [0.0000000000000000, 0.5196103791686225, 0.5711732651063401, + 0.6105590999244391, 0.6448126061663567, 0.6764476915028201, + 0.7067326523068980, 0.7364542888171910, 0.7661855555617682, + 0.7964065373291559, 0.8275735551899077, 0.8601710725555463, + 0.8947644549851196, 0.9320695842357622, 0.9730633753323727, + 1.0191847202536859, 1.0727491749396481, 1.1379465424937754, + 1.2238478702170825, 1.3580986393225509, np.inf] + + dataset = np.column_stack([p[1:], dataset[1:]]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + dataset_c = np.column_stack([p[:-1], dataset_c[:-1]]) + FuncData(_kolmogci, dataset_c, (0,), 1, rtol=_rtol).check() + + def test_smallx(self): + epsilon = 0.1 ** np.arange(1, 14) + x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217, + 0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254, + 0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658, + 0.19487060742]) + + dataset = np.column_stack([x, 1-epsilon]) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + + def test_round_trip(self): + def _ki_k(_x): + return kolmogi(kolmogorov(_x)) + + def _kci_kc(_x): + return _kolmogci(_kolmogc(_x)) + + x = np.linspace(0.0, 2.0, 21, endpoint=True) + # Exclude 0.1, 0.2. 0.2 almost makes succeeds, but 0.1 has no chance. + x02 = x[(x == 0) | (x > 0.21)] + dataset02 = np.column_stack([x02, x02]) + FuncData(_ki_k, dataset02, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([x, x]) + FuncData(_kci_kc, dataset, (0,), 1, rtol=_rtol).check() + + +class TestKolmogi: + def test_nan(self): + assert_(np.isnan(kolmogi(np.nan))) + + def test_basic(self): + dataset = [(1.0, 0), + (0.96394524366487511, 0.5), + (0.9, 0.571173265106), + (0.5000000000000000, 0.8275735551899077), + (0.26999967167735456, 1), + (0.00067092525577969533, 2)] + + dataset = np.asarray(dataset) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + def test_smallpcdf(self): + epsilon = 0.5 ** np.arange(1, 55, 3) + # kolmogi(1-p) == _kolmogci(p) if 1-(1-p) == p, but not necessarily otherwise + # Use epsilon s.t. 1-(1-epsilon)) == epsilon, + # so can use same x-array for both results + + x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941, + 0.3736868442620478, 0.3345161714909591, 0.3057833329315859, + 0.2835052890528936, 0.2655578150208676, 0.2506869966107999, + 0.2380971058736669, 0.2272549289962079, 0.2177876361600040, + 0.2094254686862041, 0.2019676748836232, 0.1952612948137504, + 0.1891874239646641, 0.1836520225050326, 0.1785795904846466]) + + dataset = np.column_stack([1-epsilon, x]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([epsilon, x]) + FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check() + + def test_smallpsf(self): + epsilon = 0.5 ** np.arange(1, 55, 3) + # kolmogi(p) == _kolmogci(1-p) if 1-(1-p) == p, but not necessarily otherwise + # Use epsilon s.t. 1-(1-epsilon)) == epsilon, + # so can use same x-array for both results + + x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343, + 1.9525136345289607, 2.2027324540033235, 2.4272929437460848, + 2.6327688477341593, 2.8233300509220260, 3.0018183401530627, + 3.1702735084088891, 3.3302184446307912, 3.4828258153113318, + 3.6290214150152051, 3.7695513262825959, 3.9050272690877326, + 4.0359582187082550, 4.1627730557884890, 4.2858371743264527]) + + dataset = np.column_stack([epsilon, x]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([1-epsilon, x]) + FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check() + + def test_round_trip(self): + def _k_ki(_p): + return kolmogorov(kolmogi(_p)) + + p = np.linspace(0.1, 1.0, 10, endpoint=True) + dataset = np.column_stack([p, p]) + FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check() + + +class TestKolmogp: + def test_nan(self): + assert_(np.isnan(_kolmogp(np.nan))) + + def test_basic(self): + dataset = [(0.000000, -0.0), + (0.200000, -1.532420541338916e-10), + (0.400000, -0.1012254419260496), + (0.600000, -1.324123244249925), + (0.800000, -1.627024345636592), + (1.000000, -1.071948558356941), + (1.200000, -0.538512430720529), + (1.400000, -0.2222133182429472), + (1.600000, -0.07649302775520538), + (1.800000, -0.02208687346347873), + (2.000000, -0.005367402045629683)] + + dataset = np.asarray(dataset) + FuncData(_kolmogp, dataset, (0,), 1, rtol=_rtol).check() diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_lambertw.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_lambertw.py new file mode 100644 index 0000000000000000000000000000000000000000..c7fde685406661b821bb1dc490ca0da173eb4bd0 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_lambertw.py @@ -0,0 +1,109 @@ +# +# Tests for the lambertw function, +# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il +# Distributed under the same license as SciPy itself. +# +# [1] mpmath source code, Subversion revision 992 +# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992 + +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_almost_equal +from scipy.special import lambertw +from numpy import nan, inf, pi, e, isnan, log, r_, array, complex128 + +from scipy.special._testutils import FuncData + + +def test_values(): + assert_(isnan(lambertw(nan))) + assert_equal(lambertw(inf,1).real, inf) + assert_equal(lambertw(inf,1).imag, 2*pi) + assert_equal(lambertw(-inf,1).real, inf) + assert_equal(lambertw(-inf,1).imag, 3*pi) + + assert_equal(lambertw(1.), lambertw(1., 0)) + + data = [ + (0,0, 0), + (0+0j,0, 0), + (inf,0, inf), + (0,-1, -inf), + (0,1, -inf), + (0,3, -inf), + (e,0, 1), + (1,0, 0.567143290409783873), + (-pi/2,0, 1j*pi/2), + (-log(2)/2,0, -log(2)), + (0.25,0, 0.203888354702240164), + (-0.25,0, -0.357402956181388903), + (-1./10000,0, -0.000100010001500266719), + (-0.25,-1, -2.15329236411034965), + (0.25,-1, -3.00899800997004620-4.07652978899159763j), + (-0.25,-1, -2.15329236411034965), + (0.25,1, -3.00899800997004620+4.07652978899159763j), + (-0.25,1, -3.48973228422959210+7.41405453009603664j), + (-4,0, 0.67881197132094523+1.91195078174339937j), + (-4,1, -0.66743107129800988+7.76827456802783084j), + (-4,-1, 0.67881197132094523-1.91195078174339937j), + (1000,0, 5.24960285240159623), + (1000,1, 4.91492239981054535+5.44652615979447070j), + (1000,-1, 4.91492239981054535-5.44652615979447070j), + (1000,5, 3.5010625305312892+29.9614548941181328j), + (3+4j,0, 1.281561806123775878+0.533095222020971071j), + (-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j), + (3+4j,1, -0.11691092896595324+5.61888039871282334j), + (3+4j,-1, 0.25856740686699742-3.85211668616143559j), + (-0.5,-1, -0.794023632344689368-0.770111750510379110j), + (-1./10000,1, -11.82350837248724344+6.80546081842002101j), + (-1./10000,-1, -11.6671145325663544), + (-1./10000,-2, -11.82350837248724344-6.80546081842002101j), + (-1./100000,4, -14.9186890769540539+26.1856750178782046j), + (-1./100000,5, -15.0931437726379218666+32.5525721210262290086j), + ((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j), + ((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j), + ((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j), + ((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j), + (-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j), + (-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j), + (-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j), + (-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j), + (pi,0, 1.073658194796149172092178407024821347547745350410314531), + + # Former bug in generated branch, + (-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j), + (-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j), + (-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j), + (-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j), + ] + data = array(data, dtype=complex128) + + def w(x, y): + return lambertw(x, y.real.astype(int)) + with np.errstate(all='ignore'): + FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check() + + +def test_ufunc(): + assert_array_almost_equal( + lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873]) + + +def test_lambertw_ufunc_loop_selection(): + # see https://github.com/scipy/scipy/issues/4895 + dt = np.dtype(np.complex128) + assert_equal(lambertw(0, 0, 0).dtype, dt) + assert_equal(lambertw([0], 0, 0).dtype, dt) + assert_equal(lambertw(0, [0], 0).dtype, dt) + assert_equal(lambertw(0, 0, [0]).dtype, dt) + assert_equal(lambertw([0], [0], [0]).dtype, dt) + + +@pytest.mark.parametrize('z', [1e-316, -2e-320j, -5e-318+1e-320j]) +def test_lambertw_subnormal_k0(z): + # Verify that subnormal inputs are handled correctly on + # the branch k=0 (regression test for gh-16291). + w = lambertw(z) + # For values this small, we can be sure that numerically, + # lambertw(z) is z. + assert w == z diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py new file mode 100644 index 0000000000000000000000000000000000000000..2fcb5a20037de46df939895d38fbe5fe6b85c9aa --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py @@ -0,0 +1,70 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_ + +from scipy.special._testutils import FuncData +from scipy.special import gamma, gammaln, loggamma + + +def test_identities1(): + # test the identity exp(loggamma(z)) = gamma(z) + x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) + y = x.copy() + x, y = np.meshgrid(x, y) + z = (x + 1J*y).flatten() + dataset = np.vstack((z, gamma(z))).T + + def f(z): + return np.exp(loggamma(z)) + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_identities2(): + # test the identity loggamma(z + 1) = log(z) + loggamma(z) + x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) + y = x.copy() + x, y = np.meshgrid(x, y) + z = (x + 1J*y).flatten() + dataset = np.vstack((z, np.log(z) + loggamma(z))).T + + def f(z): + return loggamma(z + 1) + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_complex_dispatch_realpart(): + # Test that the real parts of loggamma and gammaln agree on the + # real axis. + x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5 + + dataset = np.vstack((x, gammaln(x))).T + + def f(z): + z = np.array(z, dtype='complex128') + return loggamma(z).real + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_real_dispatch(): + x = np.logspace(-10, 10) + 0.5 + dataset = np.vstack((x, gammaln(x))).T + + FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + assert_(loggamma(0) == np.inf) + assert_(np.isnan(loggamma(-1))) + + +def test_gh_6536(): + z = loggamma(complex(-3.4, +0.0)) + zbar = loggamma(complex(-3.4, -0.0)) + assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0) + + +def test_branch_cut(): + # Make sure negative zero is treated correctly + x = -np.logspace(300, -30, 100) + z = np.asarray([complex(x0, 0.0) for x0 in x]) + zbar = np.asarray([complex(x0, -0.0) for x0 in x]) + assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9848062d3f6496dd19be34ceb9abe2858e8d48 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py @@ -0,0 +1,207 @@ +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, + assert_array_almost_equal, assert_) + +from scipy.special import logsumexp, softmax + + +def test_logsumexp(): + # Test whether logsumexp() function correctly handles large inputs. + a = np.arange(200) + desired = np.log(np.sum(np.exp(a))) + assert_almost_equal(logsumexp(a), desired) + + # Now test with large numbers + b = [1000, 1000] + desired = 1000.0 + np.log(2.0) + assert_almost_equal(logsumexp(b), desired) + + n = 1000 + b = np.full(n, 10000, dtype='float64') + desired = 10000.0 + np.log(n) + assert_almost_equal(logsumexp(b), desired) + + x = np.array([1e-40] * 1000000) + logx = np.log(x) + + X = np.vstack([x, x]) + logX = np.vstack([logx, logx]) + assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum()) + assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) + assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) + + # Handling special values properly + assert_equal(logsumexp(np.inf), np.inf) + assert_equal(logsumexp(-np.inf), -np.inf) + assert_equal(logsumexp(np.nan), np.nan) + assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf) + + # Handling an array with different magnitudes on the axes + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], axis=-1), + [1e10, -1e10]) + + # Test keeping dimensions + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], + axis=-1, + keepdims=True), + [[1e10], [-1e10]]) + + # Test multiple axes + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], + axis=(-1,-2)), + 1e10) + + +def test_logsumexp_b(): + a = np.arange(200) + b = np.arange(200, 0, -1) + desired = np.log(np.sum(b*np.exp(a))) + assert_almost_equal(logsumexp(a, b=b), desired) + + a = [1000, 1000] + b = [1.2, 1.2] + desired = 1000 + np.log(2 * 1.2) + assert_almost_equal(logsumexp(a, b=b), desired) + + x = np.array([1e-40] * 100000) + b = np.linspace(1, 1000, 100000) + logx = np.log(x) + + X = np.vstack((x, x)) + logX = np.vstack((logx, logx)) + B = np.vstack((b, b)) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum()) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)), + (B * X).sum(axis=0)) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)), + (B * X).sum(axis=1)) + + +def test_logsumexp_sign(): + a = [1,1,1] + b = [1,-1,-1] + + r, s = logsumexp(a, b=b, return_sign=True) + assert_almost_equal(r,1) + assert_equal(s,-1) + + +def test_logsumexp_sign_zero(): + a = [1,1] + b = [1,-1] + + r, s = logsumexp(a, b=b, return_sign=True) + assert_(not np.isfinite(r)) + assert_(not np.isnan(r)) + assert_(r < 0) + assert_equal(s,0) + + +def test_logsumexp_sign_shape(): + a = np.ones((1,2,3,4)) + b = np.ones_like(a) + + r, s = logsumexp(a, axis=2, b=b, return_sign=True) + + assert_equal(r.shape, s.shape) + assert_equal(r.shape, (1,2,4)) + + r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True) + + assert_equal(r.shape, s.shape) + assert_equal(r.shape, (1,3)) + + +def test_logsumexp_complex_sign(): + a = np.array([1 + 1j, 2 - 1j, -2 + 3j]) + + r, s = logsumexp(a, return_sign=True) + + expected_sumexp = np.exp(a).sum() + # This is the numpy>=2.0 convention for np.sign + expected_sign = expected_sumexp / abs(expected_sumexp) + + assert_allclose(s, expected_sign) + assert_allclose(s * np.exp(r), expected_sumexp) + + +def test_logsumexp_shape(): + a = np.ones((1, 2, 3, 4)) + b = np.ones_like(a) + + r = logsumexp(a, axis=2, b=b) + assert_equal(r.shape, (1, 2, 4)) + + r = logsumexp(a, axis=(1, 3), b=b) + assert_equal(r.shape, (1, 3)) + + +def test_logsumexp_b_zero(): + a = [1,10000] + b = [1,0] + + assert_almost_equal(logsumexp(a, b=b), 1) + + +def test_logsumexp_b_shape(): + a = np.zeros((4,1,2,1)) + b = np.ones((3,1,5)) + + logsumexp(a, b=b) + + +def test_softmax_fixtures(): + assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]), + rtol=1e-13) + assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13) + assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e), + rtol=1e-13) + + # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then + # converted to float. + x = np.arange(4) + expected = np.array([0.03205860328008499, + 0.08714431874203256, + 0.23688281808991013, + 0.6439142598879722]) + + assert_allclose(softmax(x), expected, rtol=1e-13) + + # Translation property. If all the values are changed by the same amount, + # the softmax result does not change. + assert_allclose(softmax(x + 100), expected, rtol=1e-13) + + # When axis=None, softmax operates on the entire array, and preserves + # the shape. + assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2), + rtol=1e-13) + + +def test_softmax_multi_axes(): + assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0), + np.array([[.5, .5], [.5, .5]]), rtol=1e-13) + assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1), + np.array([[1, 0], [1, 0]]), rtol=1e-13) + + # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then + # converted to float. + x = np.array([[-25, 0, 25, 50], + [1, 325, 749, 750]]) + expected = np.array([[2.678636961770877e-33, + 1.9287498479371314e-22, + 1.3887943864771144e-11, + 0.999999999986112], + [0.0, + 1.9444526359919372e-185, + 0.2689414213699951, + 0.7310585786300048]]) + assert_allclose(softmax(x, axis=1), expected, rtol=1e-13) + assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13) + + # 3-d input, with a tuple for the axis. + x3d = x.reshape(2, 2, 2) + assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2), + rtol=1e-13) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_mpmath.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_mpmath.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6cf3b91692d9daa7c01317f484d160d3798c8c --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_mpmath.py @@ -0,0 +1,2272 @@ +""" +Test SciPy functions versus mpmath, if available. + +""" +import numpy as np +from numpy.testing import assert_, assert_allclose +from numpy import pi +import pytest +import itertools + +from scipy._lib import _pep440 + +import scipy.special as sc +from scipy.special._testutils import ( + MissingModule, check_version, FuncData, + assert_func_equal) +from scipy.special._mptestutils import ( + Arg, FixedArg, ComplexArg, IntArg, assert_mpmath_equal, + nonfunctional_tooslow, trace_args, time_limited, exception_to_nan, + inf_to_nan) +from scipy.special._ufuncs import ( + _sinpi, _cospi, _lgam1p, _lanczos_sum_expg_scaled, _log1pmx, + _igam_fac) + +try: + import mpmath +except ImportError: + mpmath = MissingModule('mpmath') + + +# ------------------------------------------------------------------------------ +# expi +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.10') +def test_expi_complex(): + dataset = [] + for r in np.logspace(-99, 2, 10): + for p in np.linspace(0, 2*np.pi, 30): + z = r*np.exp(1j*p) + dataset.append((z, complex(mpmath.ei(z)))) + dataset = np.array(dataset, dtype=np.cdouble) + + FuncData(sc.expi, dataset, 0, 1).check() + + +# ------------------------------------------------------------------------------ +# expn +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +def test_expn_large_n(): + # Test the transition to the asymptotic regime of n. + dataset = [] + for n in [50, 51]: + for x in np.logspace(0, 4, 200): + with mpmath.workdps(100): + dataset.append((n, x, float(mpmath.expint(n, x)))) + dataset = np.asarray(dataset) + + FuncData(sc.expn, dataset, (0, 1), 2, rtol=1e-13).check() + +# ------------------------------------------------------------------------------ +# hyp0f1 +# ------------------------------------------------------------------------------ + + +@check_version(mpmath, '0.19') +def test_hyp0f1_gh5764(): + # Do a small and somewhat systematic test that runs quickly + dataset = [] + axis = [-99.5, -9.5, -0.5, 0.5, 9.5, 99.5] + for v in axis: + for x in axis: + for y in axis: + z = x + 1j*y + # mpmath computes the answer correctly at dps ~ 17 but + # fails for 20 < dps < 120 (uses a different method); + # set the dps high enough that this isn't an issue + with mpmath.workdps(120): + res = complex(mpmath.hyp0f1(v, z)) + dataset.append((v, z, res)) + dataset = np.array(dataset) + + FuncData(lambda v, z: sc.hyp0f1(v.real, z), dataset, (0, 1), 2, + rtol=1e-13).check() + + +@check_version(mpmath, '0.19') +def test_hyp0f1_gh_1609(): + # this is a regression test for gh-1609 + vv = np.linspace(150, 180, 21) + af = sc.hyp0f1(vv, 0.5) + mf = np.array([mpmath.hyp0f1(v, 0.5) for v in vv]) + assert_allclose(af, mf.astype(float), rtol=1e-12) + + +# ------------------------------------------------------------------------------ +# hyperu +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '1.1.0') +def test_hyperu_around_0(): + dataset = [] + # DLMF 13.2.14-15 test points. + for n in np.arange(-5, 5): + for b in np.linspace(-5, 5, 20): + a = -n + dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0)))) + a = -n + b - 1 + dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0)))) + # DLMF 13.2.16-22 test points. + for a in [-10.5, -1.5, -0.5, 0, 0.5, 1, 10]: + for b in [-1.0, -0.5, 0, 0.5, 1, 1.5, 2, 2.5]: + dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0)))) + dataset = np.array(dataset) + + FuncData(sc.hyperu, dataset, (0, 1, 2), 3, rtol=1e-15, atol=5e-13).check() + + +# ------------------------------------------------------------------------------ +# hyp2f1 +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '1.0.0') +def test_hyp2f1_strange_points(): + pts = [ + (2, -1, -1, 0.7), # expected: 2.4 + (2, -2, -2, 0.7), # expected: 3.87 + ] + pts += list(itertools.product([2, 1, -0.7, -1000], repeat=4)) + pts = [ + (a, b, c, x) for a, b, c, x in pts + if b == c and round(b) == b and b < 0 and b != -1000 + ] + kw = dict(eliminate=True) + dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts] + dataset = np.array(dataset, dtype=np.float64) + + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() + + +@check_version(mpmath, '0.13') +def test_hyp2f1_real_some_points(): + pts = [ + (1, 2, 3, 0), + (1./3, 2./3, 5./6, 27./32), + (1./4, 1./2, 3./4, 80./81), + (2,-2, -3, 3), + (2, -3, -2, 3), + (2, -1.5, -1.5, 3), + (1, 2, 3, 0), + (0.7235, -1, -5, 0.3), + (0.25, 1./3, 2, 0.999), + (0.25, 1./3, 2, -1), + (2, 3, 5, 0.99), + (3./2, -0.5, 3, 0.99), + (2, 2.5, -3.25, 0.999), + (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001), + (-10, 900, -10.5, 0.99), + (-10, 900, 10.5, 0.99), + (-1, 2, 1, 1.0), + (-1, 2, 1, -1.0), + (-3, 13, 5, 1.0), + (-3, 13, 5, -1.0), + (0.5, 1 - 270.5, 1.5, 0.999**2), # from issue 1561 + ] + dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts] + dataset = np.array(dataset, dtype=np.float64) + + with np.errstate(invalid='ignore'): + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() + + +@check_version(mpmath, '0.14') +def test_hyp2f1_some_points_2(): + # Taken from mpmath unit tests -- this point failed for mpmath 0.13 but + # was fixed in their SVN since then + pts = [ + (112, (51,10), (-9,10), -0.99999), + (10,-900,10.5,0.99), + (10,-900,-10.5,0.99), + ] + + def fev(x): + if isinstance(x, tuple): + return float(x[0]) / x[1] + else: + return x + + dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts] + dataset = np.array(dataset, dtype=np.float64) + + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() + + +@check_version(mpmath, '0.13') +def test_hyp2f1_real_some(): + dataset = [] + for a in [-10, -5, -1.8, 1.8, 5, 10]: + for b in [-2.5, -1, 1, 7.4]: + for c in [-9, -1.8, 5, 20.4]: + for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]: + try: + v = float(mpmath.hyp2f1(a, b, c, z)) + except Exception: + continue + dataset.append((a, b, c, z, v)) + dataset = np.array(dataset, dtype=np.float64) + + with np.errstate(invalid='ignore'): + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9, + ignore_inf_sign=True).check() + + +@check_version(mpmath, '0.12') +@pytest.mark.slow +def test_hyp2f1_real_random(): + npoints = 500 + dataset = np.zeros((npoints, 5), np.float64) + + np.random.seed(1234) + dataset[:, 0] = np.random.pareto(1.5, npoints) + dataset[:, 1] = np.random.pareto(1.5, npoints) + dataset[:, 2] = np.random.pareto(1.5, npoints) + dataset[:, 3] = 2*np.random.rand(npoints) - 1 + + dataset[:, 0] *= (-1)**np.random.randint(2, npoints) + dataset[:, 1] *= (-1)**np.random.randint(2, npoints) + dataset[:, 2] *= (-1)**np.random.randint(2, npoints) + + for ds in dataset: + if mpmath.__version__ < '0.14': + # mpmath < 0.14 fails for c too much smaller than a, b + if abs(ds[:2]).max() > abs(ds[2]): + ds[2] = abs(ds[:2]).max() + ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4]))) + + FuncData(sc.hyp2f1, dataset, (0, 1, 2, 3), 4, rtol=1e-9).check() + + +# ------------------------------------------------------------------------------ +# erf (complex) +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.14') +def test_erf_complex(): + # need to increase mpmath precision for this test + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + mpmath.mp.dps = 70 + x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11)) + x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11)) + points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()] + + assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points, + vectorized=False, rtol=1e-13) + assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points, + vectorized=False, rtol=1e-13) + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + +# ------------------------------------------------------------------------------ +# lpmv +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.15') +def test_lpmv(): + pts = [] + for x in [-0.99, -0.557, 1e-6, 0.132, 1]: + pts.extend([ + (1, 1, x), + (1, -1, x), + (-1, 1, x), + (-1, -2, x), + (1, 1.7, x), + (1, -1.7, x), + (-1, 1.7, x), + (-1, -2.7, x), + (1, 10, x), + (1, 11, x), + (3, 8, x), + (5, 11, x), + (-3, 8, x), + (-5, 11, x), + (3, -8, x), + (5, -11, x), + (-3, -8, x), + (-5, -11, x), + (3, 8.3, x), + (5, 11.3, x), + (-3, 8.3, x), + (-5, 11.3, x), + (3, -8.3, x), + (5, -11.3, x), + (-3, -8.3, x), + (-5, -11.3, x), + ]) + + def mplegenp(nu, mu, x): + if mu == int(mu) and x == 1: + # mpmath 0.17 gets this wrong + if mu == 0: + return 1 + else: + return 0 + return mpmath.legenp(nu, mu, x) + + dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts] + dataset = np.array(dataset, dtype=np.float64) + + def evf(mu, nu, x): + return sc.lpmv(mu.astype(int), nu, x) + + with np.errstate(invalid='ignore'): + FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check() + + +# ------------------------------------------------------------------------------ +# beta +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.15') +def test_beta(): + np.random.seed(1234) + + b = np.r_[np.logspace(-200, 200, 4), + np.logspace(-10, 10, 4), + np.logspace(-1, 1, 4), + np.arange(-10, 11, 1), + np.arange(-10, 11, 1) + 0.5, + -1, -2.3, -3, -100.3, -10003.4] + a = b + + ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T + + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + mpmath.mp.dps = 400 + + assert_func_equal(sc.beta, + lambda a, b: float(mpmath.beta(a, b)), + ab, + vectorized=False, + rtol=1e-10, + ignore_inf_sign=True) + + assert_func_equal( + sc.betaln, + lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))), + ab, + vectorized=False, + rtol=1e-10) + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + +# ------------------------------------------------------------------------------ +# loggamma +# ------------------------------------------------------------------------------ + +LOGGAMMA_TAYLOR_RADIUS = 0.2 + + +@check_version(mpmath, '0.19') +def test_loggamma_taylor_transition(): + # Make sure there isn't a big jump in accuracy when we move from + # using the Taylor series to using the recurrence relation. + + r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1]) + theta = np.linspace(0, 2*np.pi, 20) + r, theta = np.meshgrid(r, theta) + dz = r*np.exp(1j*theta) + z = np.r_[1 + dz, 2 + dz].flatten() + + dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z] + dataset = np.array(dataset) + + FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() + + +@check_version(mpmath, '0.19') +def test_loggamma_taylor(): + # Test around the zeros at z = 1, 2. + + r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10) + theta = np.linspace(0, 2*np.pi, 20) + r, theta = np.meshgrid(r, theta) + dz = r*np.exp(1j*theta) + z = np.r_[1 + dz, 2 + dz].flatten() + + dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z] + dataset = np.array(dataset) + + FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() + + +# ------------------------------------------------------------------------------ +# rgamma +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_rgamma_zeros(): + # Test around the zeros at z = 0, -1, -2, ..., -169. (After -169 we + # get values that are out of floating point range even when we're + # within 0.1 of the zero.) + + # Can't use too many points here or the test takes forever. + dx = np.r_[-np.logspace(-1, -13, 3), 0, np.logspace(-13, -1, 3)] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + zeros = np.arange(0, -170, -1).reshape(1, 1, -1) + z = (zeros + np.dstack((dz,)*zeros.size)).flatten() + with mpmath.workdps(100): + dataset = [(z0, complex(mpmath.rgamma(z0))) for z0 in z] + + dataset = np.array(dataset) + FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check() + + +# ------------------------------------------------------------------------------ +# digamma +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_digamma_roots(): + # Test the special-cased roots for digamma. + root = mpmath.findroot(mpmath.digamma, 1.5) + roots = [float(root)] + root = mpmath.findroot(mpmath.digamma, -0.5) + roots.append(float(root)) + roots = np.array(roots) + + # If we test beyond a radius of 0.24 mpmath will take forever. + dx = np.r_[-0.24, -np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10), 0.24] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + z = (roots + np.dstack((dz,)*roots.size)).flatten() + with mpmath.workdps(30): + dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z] + + dataset = np.array(dataset) + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() + + +@check_version(mpmath, '0.19') +def test_digamma_negreal(): + # Test digamma around the negative real axis. Don't do this in + # TestSystematic because the points need some jiggering so that + # mpmath doesn't take forever. + + digamma = exception_to_nan(mpmath.digamma) + + x = -np.logspace(300, -30, 100) + y = np.r_[-np.logspace(0, -3, 5), 0, np.logspace(-3, 0, 5)] + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + with mpmath.workdps(40): + dataset = [(z0, complex(digamma(z0))) for z0 in z] + dataset = np.asarray(dataset) + + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() + + +@check_version(mpmath, '0.19') +def test_digamma_boundary(): + # Check that there isn't a jump in accuracy when we switch from + # using the asymptotic series to the reflection formula. + + x = -np.logspace(300, -30, 100) + y = np.array([-6.1, -5.9, 5.9, 6.1]) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + with mpmath.workdps(30): + dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z] + dataset = np.asarray(dataset) + + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() + + +# ------------------------------------------------------------------------------ +# gammainc +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_gammainc_boundary(): + # Test the transition to the asymptotic series. + small = 20 + a = np.linspace(0.5*small, 2*small, 50) + x = a.copy() + a, x = np.meshgrid(a, x) + a, x = a.flatten(), x.flatten() + with mpmath.workdps(100): + dataset = [(a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True))) + for a0, x0 in zip(a, x)] + dataset = np.array(dataset) + + FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check() + + +# ------------------------------------------------------------------------------ +# spence +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_spence_circle(): + # The trickiest region for spence is around the circle |z - 1| = 1, + # so test that region carefully. + + def spence(z): + return complex(mpmath.polylog(2, 1 - z)) + + r = np.linspace(0.5, 1.5) + theta = np.linspace(0, 2*pi) + z = (1 + np.outer(r, np.exp(1j*theta))).flatten() + dataset = np.asarray([(z0, spence(z0)) for z0 in z]) + + FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check() + + +# ------------------------------------------------------------------------------ +# sinpi and cospi +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +def test_sinpi_zeros(): + eps = np.finfo(float).eps + dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + zeros = np.arange(-100, 100, 1).reshape(1, 1, -1) + z = (zeros + np.dstack((dz,)*zeros.size)).flatten() + dataset = np.asarray([(z0, complex(mpmath.sinpi(z0))) + for z0 in z]) + FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check() + + +@check_version(mpmath, '0.19') +def test_cospi_zeros(): + eps = np.finfo(float).eps + dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1) + z = (zeros + np.dstack((dz,)*zeros.size)).flatten() + dataset = np.asarray([(z0, complex(mpmath.cospi(z0))) + for z0 in z]) + + FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check() + + +# ------------------------------------------------------------------------------ +# ellipj +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +def test_dn_quarter_period(): + def dn(u, m): + return sc.ellipj(u, m)[2] + + def mpmath_dn(u, m): + return float(mpmath.ellipfun("dn", u=u, m=m)) + + m = np.linspace(0, 1, 20) + du = np.r_[-np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10)] + dataset = [] + for m0 in m: + u0 = float(mpmath.ellipk(m0)) + for du0 in du: + p = u0 + du0 + dataset.append((p, m0, mpmath_dn(p, m0))) + dataset = np.asarray(dataset) + + FuncData(dn, dataset, (0, 1), 2, rtol=1e-10).check() + + +# ------------------------------------------------------------------------------ +# Wright Omega +# ------------------------------------------------------------------------------ + +def _mpmath_wrightomega(z, dps): + with mpmath.workdps(dps): + z = mpmath.mpc(z) + unwind = mpmath.ceil((z.imag - mpmath.pi)/(2*mpmath.pi)) + res = mpmath.lambertw(mpmath.exp(z), unwind) + return res + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_wrightomega_branch(): + x = -np.logspace(10, 0, 25) + picut_above = [np.nextafter(np.pi, np.inf)] + picut_below = [np.nextafter(np.pi, -np.inf)] + npicut_above = [np.nextafter(-np.pi, np.inf)] + npicut_below = [np.nextafter(-np.pi, -np.inf)] + for i in range(50): + picut_above.append(np.nextafter(picut_above[-1], np.inf)) + picut_below.append(np.nextafter(picut_below[-1], -np.inf)) + npicut_above.append(np.nextafter(npicut_above[-1], np.inf)) + npicut_below.append(np.nextafter(npicut_below[-1], -np.inf)) + y = np.hstack((picut_above, picut_below, npicut_above, npicut_below)) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25))) + for z0 in z]) + + FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check() + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_wrightomega_region1(): + # This region gets less coverage in the TestSystematic test + x = np.linspace(-2, 1) + y = np.linspace(1, 2*np.pi) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25))) + for z0 in z]) + + FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_wrightomega_region2(): + # This region gets less coverage in the TestSystematic test + x = np.linspace(-2, 1) + y = np.linspace(-2*np.pi, -1) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25))) + for z0 in z]) + + FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() + + +# ------------------------------------------------------------------------------ +# lambertw +# ------------------------------------------------------------------------------ + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_lambertw_smallz(): + x, y = np.linspace(-1, 1, 25), np.linspace(-1, 1, 25) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = np.asarray([(z0, complex(mpmath.lambertw(z0))) + for z0 in z]) + + FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check() + + +# ------------------------------------------------------------------------------ +# Systematic tests +# ------------------------------------------------------------------------------ + +HYPERKW = dict(maxprec=200, maxterms=200) + + +@pytest.mark.slow +@check_version(mpmath, '0.17') +class TestSystematic: + + def test_airyai(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[0], + mpmath.airyai, + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[0], + mpmath.airyai, + [Arg(-1e3, 1e3)]) + + def test_airyai_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[0], + mpmath.airyai, + [ComplexArg()]) + + def test_airyai_prime(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: + mpmath.airyai(z, derivative=1), + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: + mpmath.airyai(z, derivative=1), + [Arg(-1e3, 1e3)]) + + def test_airyai_prime_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: + mpmath.airyai(z, derivative=1), + [ComplexArg()]) + + def test_airybi(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: + mpmath.airybi(z), + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: + mpmath.airybi(z), + [Arg(-1e3, 1e3)]) + + def test_airybi_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: + mpmath.airybi(z), + [ComplexArg()]) + + def test_airybi_prime(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: + mpmath.airybi(z, derivative=1), + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: + mpmath.airybi(z, derivative=1), + [Arg(-1e3, 1e3)]) + + def test_airybi_prime_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: + mpmath.airybi(z, derivative=1), + [ComplexArg()]) + + def test_bei(self): + assert_mpmath_equal(sc.bei, + exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)), + [Arg(-1e3, 1e3)]) + + def test_ber(self): + assert_mpmath_equal(sc.ber, + exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)), + [Arg(-1e3, 1e3)]) + + def test_bernoulli(self): + assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)], + lambda n: float(mpmath.bernoulli(int(n))), + [IntArg(0, 13000)], + rtol=1e-9, n=13000) + + def test_besseli(self): + assert_mpmath_equal( + sc.iv, + exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), Arg()], + atol=1e-270, + ) + + def test_besseli_complex(self): + assert_mpmath_equal( + lambda v, z: sc.iv(v.real, z), + exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), ComplexArg()], + ) + + def test_besselj(self): + assert_mpmath_equal( + sc.jv, + exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), Arg(-1e3, 1e3)], + ignore_inf_sign=True, + ) + + # loss of precision at large arguments due to oscillation + assert_mpmath_equal( + sc.jv, + exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], + ignore_inf_sign=True, + rtol=1e-5, + ) + + def test_besselj_complex(self): + assert_mpmath_equal( + lambda v, z: sc.jv(v.real, z), + exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), + [Arg(), ComplexArg()] + ) + + def test_besselk(self): + assert_mpmath_equal( + sc.kv, + mpmath.besselk, + [Arg(-200, 200), Arg(0, np.inf)], + nan_ok=False, + rtol=1e-12, + ) + + def test_besselk_int(self): + assert_mpmath_equal( + sc.kn, + mpmath.besselk, + [IntArg(-200, 200), Arg(0, np.inf)], + nan_ok=False, + rtol=1e-12, + ) + + def test_besselk_complex(self): + assert_mpmath_equal( + lambda v, z: sc.kv(v.real, z), + exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), ComplexArg()], + ) + + def test_bessely(self): + def mpbessely(v, x): + r = float(mpmath.bessely(v, x, **HYPERKW)) + if abs(r) > 1e305: + # overflowing to inf a bit earlier is OK + r = np.inf * np.sign(r) + if abs(r) == 0 and x == 0: + # invalid result from mpmath, point x=0 is a divergence + return np.nan + return r + assert_mpmath_equal( + sc.yv, + exception_to_nan(mpbessely), + [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], + n=5000, + ) + + def test_bessely_complex(self): + def mpbessely(v, x): + r = complex(mpmath.bessely(v, x, **HYPERKW)) + if abs(r) > 1e305: + # overflowing to inf a bit earlier is OK + with np.errstate(invalid='ignore'): + r = np.inf * np.sign(r) + return r + assert_mpmath_equal( + lambda v, z: sc.yv(v.real, z), + exception_to_nan(mpbessely), + [Arg(), ComplexArg()], + n=15000, + ) + + def test_bessely_int(self): + def mpbessely(v, x): + r = float(mpmath.bessely(v, x)) + if abs(r) == 0 and x == 0: + # invalid result from mpmath, point x=0 is a divergence + return np.nan + return r + assert_mpmath_equal( + lambda v, z: sc.yn(int(v), z), + exception_to_nan(mpbessely), + [IntArg(-1000, 1000), Arg(-1e8, 1e8)], + ) + + def test_beta(self): + bad_points = [] + + def beta(a, b, nonzero=False): + if a < -1e12 or b < -1e12: + # Function is defined here only at integers, but due + # to loss of precision this is numerically + # ill-defined. Don't compare values here. + return np.nan + if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0: + # close to a zero of the function: mpmath and scipy + # will not round here the same, so the test needs to be + # run with an absolute tolerance + if nonzero: + bad_points.append((float(a), float(b))) + return np.nan + return mpmath.beta(a, b) + + assert_mpmath_equal( + sc.beta, + lambda a, b: beta(a, b, nonzero=True), + [Arg(), Arg()], + dps=400, + ignore_inf_sign=True, + ) + + assert_mpmath_equal( + sc.beta, + beta, + np.array(bad_points), + dps=400, + ignore_inf_sign=True, + atol=1e-11, + ) + + def test_betainc(self): + assert_mpmath_equal( + sc.betainc, + time_limited()( + exception_to_nan( + lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True) + ) + ), + [Arg(), Arg(), Arg()], + ) + + def test_betaincc(self): + assert_mpmath_equal( + sc.betaincc, + time_limited()( + exception_to_nan( + lambda a, b, x: mpmath.betainc(a, b, x, 1, regularized=True) + ) + ), + [Arg(), Arg(), Arg()], + dps=400, + ) + + def test_binom(self): + bad_points = [] + + def binomial(n, k, nonzero=False): + if abs(k) > 1e8*(abs(n) + 1): + # The binomial is rapidly oscillating in this region, + # and the function is numerically ill-defined. Don't + # compare values here. + return np.nan + if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15: + # close to a zero of the function: mpmath and scipy + # will not round here the same, so the test needs to be + # run with an absolute tolerance + if nonzero: + bad_points.append((float(n), float(k))) + return np.nan + return mpmath.binomial(n, k) + + assert_mpmath_equal( + sc.binom, + lambda n, k: binomial(n, k, nonzero=True), + [Arg(), Arg()], + dps=400, + ) + + assert_mpmath_equal( + sc.binom, + binomial, + np.array(bad_points), + dps=400, + atol=1e-14, + ) + + def test_chebyt_int(self): + assert_mpmath_equal( + lambda n, x: sc.eval_chebyt(int(n), x), + exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)), + [IntArg(), Arg()], + dps=50, + ) + + @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate") + def test_chebyt(self): + assert_mpmath_equal( + sc.eval_chebyt, + lambda n, x: time_limited()( + exception_to_nan(mpmath.chebyt) + )(n, x, **HYPERKW), + [Arg(-101, 101), Arg()], + n=10000, + ) + + def test_chebyu_int(self): + assert_mpmath_equal( + lambda n, x: sc.eval_chebyu(int(n), x), + exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)), + [IntArg(), Arg()], + dps=50, + ) + + @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate") + def test_chebyu(self): + assert_mpmath_equal( + sc.eval_chebyu, + lambda n, x: time_limited()( + exception_to_nan(mpmath.chebyu) + )(n, x, **HYPERKW), + [Arg(-101, 101), Arg()], + ) + + def test_chi(self): + def chi(x): + return sc.shichi(x)[1] + assert_mpmath_equal(chi, mpmath.chi, [Arg()]) + # check asymptotic series cross-over + assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) + + def test_chi_complex(self): + def chi(z): + return sc.shichi(z)[1] + # chi oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal( + chi, + mpmath.chi, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-12, + ) + + def test_ci(self): + def ci(x): + return sc.sici(x)[1] + # oscillating function: limit range + assert_mpmath_equal(ci, mpmath.ci, [Arg(-1e8, 1e8)]) + + def test_ci_complex(self): + def ci(z): + return sc.sici(z)[1] + # ci oscillates as Re[z] -> +- inf, so limit range + assert_mpmath_equal( + ci, + mpmath.ci, + [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))], + rtol=1e-8, + ) + + def test_cospi(self): + eps = np.finfo(float).eps + assert_mpmath_equal(_cospi, mpmath.cospi, [Arg()], nan_ok=False, rtol=2*eps) + + def test_cospi_complex(self): + assert_mpmath_equal( + _cospi, + mpmath.cospi, + [ComplexArg()], + nan_ok=False, + rtol=1e-13, + ) + + def test_digamma(self): + assert_mpmath_equal( + sc.digamma, + exception_to_nan(mpmath.digamma), + [Arg()], + rtol=1e-12, + dps=50, + ) + + def test_digamma_complex(self): + # Test on a cut plane because mpmath will hang. See + # test_digamma_negreal for tests on the negative real axis. + def param_filter(z): + return np.where((z.real < 0) & (np.abs(z.imag) < 1.12), False, True) + + assert_mpmath_equal( + sc.digamma, + exception_to_nan(mpmath.digamma), + [ComplexArg()], + rtol=1e-13, + dps=40, + param_filter=param_filter + ) + + def test_e1(self): + assert_mpmath_equal( + sc.exp1, + mpmath.e1, + [Arg()], + rtol=1e-14, + ) + + def test_e1_complex(self): + # E_1 oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal( + sc.exp1, + mpmath.e1, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-11, + ) + + # Check cross-over region + assert_mpmath_equal( + sc.exp1, + mpmath.e1, + (np.linspace(-50, 50, 171)[:, None] + + np.r_[0, np.logspace(-3, 2, 61), -np.logspace(-3, 2, 11)]*1j).ravel(), + rtol=1e-11, + ) + assert_mpmath_equal( + sc.exp1, + mpmath.e1, + (np.linspace(-50, -35, 10000) + 0j), + rtol=1e-11, + ) + + def test_exprel(self): + assert_mpmath_equal( + sc.exprel, + lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), + [Arg(a=-np.log(np.finfo(np.float64).max), + b=np.log(np.finfo(np.float64).max))], + ) + assert_mpmath_equal( + sc.exprel, + lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), + np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), + rtol=1e-11, + ) + assert_(np.isinf(sc.exprel(np.inf))) + assert_(sc.exprel(-np.inf) == 0) + + def test_expm1_complex(self): + # Oscillates as a function of Im[z], so limit range to avoid loss of precision + assert_mpmath_equal( + sc.expm1, + mpmath.expm1, + [ComplexArg(complex(-np.inf, -1e7), complex(np.inf, 1e7))], + ) + + def test_log1p_complex(self): + assert_mpmath_equal( + sc.log1p, + lambda x: mpmath.log(x+1), + [ComplexArg()], + dps=60, + ) + + def test_log1pmx(self): + assert_mpmath_equal( + _log1pmx, + lambda x: mpmath.log(x + 1) - x, + [Arg()], + dps=60, + rtol=1e-14, + ) + + def test_ei(self): + assert_mpmath_equal(sc.expi, mpmath.ei, [Arg()], rtol=1e-11) + + def test_ei_complex(self): + # Ei oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal( + sc.expi, + mpmath.ei, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-9, + ) + + def test_ellipe(self): + assert_mpmath_equal(sc.ellipe, mpmath.ellipe, [Arg(b=1.0)]) + + def test_ellipeinc(self): + assert_mpmath_equal(sc.ellipeinc, mpmath.ellipe, [Arg(-1e3, 1e3), Arg(b=1.0)]) + + def test_ellipeinc_largephi(self): + assert_mpmath_equal(sc.ellipeinc, mpmath.ellipe, [Arg(), Arg()]) + + def test_ellipf(self): + assert_mpmath_equal(sc.ellipkinc, mpmath.ellipf, [Arg(-1e3, 1e3), Arg()]) + + def test_ellipf_largephi(self): + assert_mpmath_equal(sc.ellipkinc, mpmath.ellipf, [Arg(), Arg()]) + + def test_ellipk(self): + assert_mpmath_equal(sc.ellipk, mpmath.ellipk, [Arg(b=1.0)]) + assert_mpmath_equal( + sc.ellipkm1, + lambda m: mpmath.ellipk(1 - m), + [Arg(a=0.0)], + dps=400, + ) + + def test_ellipkinc(self): + def ellipkinc(phi, m): + return mpmath.ellippi(0, phi, m) + assert_mpmath_equal( + sc.ellipkinc, + ellipkinc, + [Arg(-1e3, 1e3), Arg(b=1.0)], + ignore_inf_sign=True, + ) + + def test_ellipkinc_largephi(self): + def ellipkinc(phi, m): + return mpmath.ellippi(0, phi, m) + assert_mpmath_equal( + sc.ellipkinc, + ellipkinc, + [Arg(), Arg(b=1.0)], + ignore_inf_sign=True, + ) + + def test_ellipfun_sn(self): + def sn(u, m): + # mpmath doesn't get the zero at u = 0--fix that + if u == 0: + return 0 + else: + return mpmath.ellipfun("sn", u=u, m=m) + + # Oscillating function --- limit range of first argument; the + # loss of precision there is an expected numerical feature + # rather than an actual bug + assert_mpmath_equal( + lambda u, m: sc.ellipj(u, m)[0], + sn, + [Arg(-1e6, 1e6), Arg(a=0, b=1)], + rtol=1e-8, + ) + + def test_ellipfun_cn(self): + # see comment in ellipfun_sn + assert_mpmath_equal( + lambda u, m: sc.ellipj(u, m)[1], + lambda u, m: mpmath.ellipfun("cn", u=u, m=m), + [Arg(-1e6, 1e6), Arg(a=0, b=1)], + rtol=1e-8, + ) + + def test_ellipfun_dn(self): + # see comment in ellipfun_sn + assert_mpmath_equal( + lambda u, m: sc.ellipj(u, m)[2], + lambda u, m: mpmath.ellipfun("dn", u=u, m=m), + [Arg(-1e6, 1e6), Arg(a=0, b=1)], + rtol=1e-8, + ) + + def test_erf(self): + assert_mpmath_equal(sc.erf, lambda z: mpmath.erf(z), [Arg()]) + + def test_erf_complex(self): + assert_mpmath_equal(sc.erf, lambda z: mpmath.erf(z), [ComplexArg()], n=200) + + def test_erfc(self): + assert_mpmath_equal( + sc.erfc, + exception_to_nan(lambda z: mpmath.erfc(z)), + [Arg()], + rtol=1e-13, + ) + + def test_erfc_complex(self): + assert_mpmath_equal( + sc.erfc, + exception_to_nan(lambda z: mpmath.erfc(z)), + [ComplexArg()], + n=200, + ) + + def test_erfi(self): + assert_mpmath_equal(sc.erfi, mpmath.erfi, [Arg()], n=200) + + def test_erfi_complex(self): + assert_mpmath_equal(sc.erfi, mpmath.erfi, [ComplexArg()], n=200) + + def test_ndtr(self): + assert_mpmath_equal( + sc.ndtr, + exception_to_nan(lambda z: mpmath.ncdf(z)), + [Arg()], + n=200, + ) + + def test_ndtr_complex(self): + assert_mpmath_equal( + sc.ndtr, + lambda z: mpmath.erfc(-z/np.sqrt(2.))/2., + [ComplexArg(a=complex(-10000, -10000), b=complex(10000, 10000))], + n=400, + ) + + def test_log_ndtr(self): + assert_mpmath_equal( + sc.log_ndtr, + exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))), + [Arg()], n=600, dps=300, rtol=1e-13, + ) + + def test_log_ndtr_complex(self): + assert_mpmath_equal( + sc.log_ndtr, + exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)), + [ComplexArg(a=complex(-10000, -100), b=complex(10000, 100))], + n=200, dps=300, + ) + + def test_eulernum(self): + assert_mpmath_equal( + lambda n: sc.euler(n)[-1], + mpmath.eulernum, + [IntArg(1, 10000)], + n=10000, + ) + + def test_expint(self): + assert_mpmath_equal( + sc.expn, + mpmath.expint, + [IntArg(0, 200), Arg(0, np.inf)], + rtol=1e-13, + dps=160, + ) + + def test_fresnels(self): + def fresnels(x): + return sc.fresnel(x)[0] + assert_mpmath_equal(fresnels, mpmath.fresnels, [Arg()]) + + def test_fresnelc(self): + def fresnelc(x): + return sc.fresnel(x)[1] + assert_mpmath_equal(fresnelc, mpmath.fresnelc, [Arg()]) + + def test_gamma(self): + assert_mpmath_equal(sc.gamma, exception_to_nan(mpmath.gamma), [Arg()]) + + def test_gamma_complex(self): + assert_mpmath_equal( + sc.gamma, + exception_to_nan(mpmath.gamma), + [ComplexArg()], + rtol=5e-13, + ) + + def test_gammainc(self): + # Larger arguments are tested in test_data.py:test_local + assert_mpmath_equal( + sc.gammainc, + lambda z, b: mpmath.gammainc(z, b=b, regularized=True), + [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)], + nan_ok=False, + rtol=1e-11, + ) + + def test_gammaincc(self): + # Larger arguments are tested in test_data.py:test_local + assert_mpmath_equal( + sc.gammaincc, + lambda z, a: mpmath.gammainc(z, a=a, regularized=True), + [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)], + nan_ok=False, + rtol=1e-11, + ) + + def test_gammaln(self): + # The real part of loggamma is log(|gamma(z)|). + def f(z): + return mpmath.loggamma(z).real + + assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()]) + + @pytest.mark.xfail(run=False) + def test_gegenbauer(self): + assert_mpmath_equal( + sc.eval_gegenbauer, + exception_to_nan(mpmath.gegenbauer), + [Arg(-1e3, 1e3), Arg(), Arg()], + ) + + def test_gegenbauer_int(self): + # Redefine functions to deal with numerical + mpmath issues + def gegenbauer(n, a, x): + # Avoid overflow at large `a` (mpmath would need an even larger + # dps to handle this correctly, so just skip this region) + if abs(a) > 1e100: + return np.nan + + # Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these + # always correctly + if n == 0: + r = 1.0 + elif n == 1: + r = 2*a*x + else: + r = mpmath.gegenbauer(n, a, x) + + # Mpmath 0.17 gives wrong results (spurious zero) in some cases, so + # compute the value by perturbing the result + if float(r) == 0 and a < -1 and float(a) == int(float(a)): + r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x) + if abs(r) < mpmath.mpf('1e-50'): + r = mpmath.mpf('0.0') + + # Differing overflow thresholds in scipy vs. mpmath + if abs(r) > 1e270: + return np.inf + return r + + def sc_gegenbauer(n, a, x): + r = sc.eval_gegenbauer(int(n), a, x) + # Differing overflow thresholds in scipy vs. mpmath + if abs(r) > 1e270: + return np.inf + return r + assert_mpmath_equal( + sc_gegenbauer, + exception_to_nan(gegenbauer), + [IntArg(0, 100), Arg(-1e9, 1e9), Arg()], + n=40000, dps=100, ignore_inf_sign=True, rtol=1e-6, + ) + + # Check the small-x expansion + assert_mpmath_equal( + sc_gegenbauer, + exception_to_nan(gegenbauer), + [IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))], + dps=100, ignore_inf_sign=True, + ) + + @pytest.mark.xfail(run=False) + def test_gegenbauer_complex(self): + assert_mpmath_equal( + lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x), + exception_to_nan(mpmath.gegenbauer), + [IntArg(0, 100), Arg(), ComplexArg()], + ) + + @nonfunctional_tooslow + def test_gegenbauer_complex_general(self): + assert_mpmath_equal( + lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x), + exception_to_nan(mpmath.gegenbauer), + [Arg(-1e3, 1e3), Arg(), ComplexArg()], + ) + + def test_hankel1(self): + assert_mpmath_equal( + sc.hankel1, + exception_to_nan(lambda v, x: mpmath.hankel1(v, x, **HYPERKW)), + [Arg(-1e20, 1e20), Arg()], + ) + + def test_hankel2(self): + assert_mpmath_equal( + sc.hankel2, + exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)), + [Arg(-1e20, 1e20), Arg()], + ) + + @pytest.mark.xfail(run=False, reason="issues at intermediately large orders") + def test_hermite(self): + assert_mpmath_equal( + lambda n, x: sc.eval_hermite(int(n), x), + exception_to_nan(mpmath.hermite), + [IntArg(0, 10000), Arg()], + ) + + # hurwitz: same as zeta + + def test_hyp0f1(self): + # mpmath reports no convergence unless maxterms is large enough + KW = dict(maxprec=400, maxterms=1500) + # n=500 (non-xslow default) fails for one bad point + assert_mpmath_equal( + sc.hyp0f1, + lambda a, x: mpmath.hyp0f1(a, x, **KW), + [Arg(-1e7, 1e7), Arg(0, 1e5)], + n=5000, + ) + # NB: The range of the second parameter ("z") is limited from below + # because of an overflow in the intermediate calculations. The way + # for fix it is to implement an asymptotic expansion for Bessel J + # (similar to what is implemented for Bessel I here). + + def test_hyp0f1_complex(self): + assert_mpmath_equal( + lambda a, z: sc.hyp0f1(a.real, z), + exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)), + [Arg(-10, 10), ComplexArg(complex(-120, -120), complex(120, 120))], + ) + # NB: The range of the first parameter ("v") are limited by an overflow + # in the intermediate calculations. Can be fixed by implementing an + # asymptotic expansion for Bessel functions for large order. + + def test_hyp1f1(self): + def mpmath_hyp1f1(a, b, x): + try: + return mpmath.hyp1f1(a, b, x) + except ZeroDivisionError: + return np.inf + + assert_mpmath_equal( + sc.hyp1f1, + mpmath_hyp1f1, + [Arg(-50, 50), Arg(1, 50, inclusive_a=False), Arg(-50, 50)], + n=500, + nan_ok=False, + ) + + @pytest.mark.xfail(run=False) + def test_hyp1f1_complex(self): + assert_mpmath_equal( + inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)), + exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), + [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()], + n=2000, + ) + + @nonfunctional_tooslow + def test_hyp2f1_complex(self): + # SciPy's hyp2f1 seems to have performance and accuracy problems + assert_mpmath_equal( + lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x), + exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)), + [Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()], + n=10, + ) + + @pytest.mark.xfail(run=False) + def test_hyperu(self): + assert_mpmath_equal( + sc.hyperu, + exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)), + [Arg(), Arg(), Arg()], + ) + + @pytest.mark.xfail_on_32bit("mpmath issue gh-342: " + "unsupported operand mpz, long for pow") + def test_igam_fac(self): + def mp_igam_fac(a, x): + return mpmath.power(x, a)*mpmath.exp(-x)/mpmath.gamma(a) + + assert_mpmath_equal( + _igam_fac, + mp_igam_fac, + [Arg(0, 1e14, inclusive_a=False), Arg(0, 1e14)], + rtol=1e-10, + ) + + def test_j0(self): + # The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x) + # and at large arguments the phase of the cosine loses precision. + # + # This is numerically expected behavior, so we compare only up to + # 1e8 = 1e15 * 1e-7 + assert_mpmath_equal(sc.j0, mpmath.j0, [Arg(-1e3, 1e3)]) + assert_mpmath_equal(sc.j0, mpmath.j0, [Arg(-1e8, 1e8)], rtol=1e-5) + + def test_j1(self): + # See comment in test_j0 + assert_mpmath_equal(sc.j1, mpmath.j1, [Arg(-1e3, 1e3)]) + assert_mpmath_equal(sc.j1, mpmath.j1, [Arg(-1e8, 1e8)], rtol=1e-5) + + @pytest.mark.xfail(run=False) + def test_jacobi(self): + assert_mpmath_equal( + sc.eval_jacobi, + exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), + [Arg(), Arg(), Arg(), Arg()], + ) + assert_mpmath_equal( + lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x), + exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), + [IntArg(), Arg(), Arg(), Arg()], + ) + + def test_jacobi_int(self): + # Redefine functions to deal with numerical + mpmath issues + def jacobi(n, a, b, x): + # Mpmath does not handle n=0 case always correctly + if n == 0: + return 1.0 + return mpmath.jacobi(n, a, b, x) + assert_mpmath_equal( + lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x), + lambda n, a, b, x: exception_to_nan(jacobi)(n, a, b, x, **HYPERKW), + [IntArg(), Arg(), Arg(), Arg()], + n=20000, + dps=50, + ) + + def test_kei(self): + def kei(x): + if x == 0: + # work around mpmath issue at x=0 + return -pi/4 + return exception_to_nan(mpmath.kei)(0, x, **HYPERKW) + assert_mpmath_equal(sc.kei, kei, [Arg(-1e30, 1e30)], n=1000) + + def test_ker(self): + assert_mpmath_equal( + sc.ker, + exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)), + [Arg(-1e30, 1e30)], + n=1000, + ) + + @nonfunctional_tooslow + def test_laguerre(self): + assert_mpmath_equal( + trace_args(sc.eval_laguerre), + lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), + [Arg(), Arg()], + ) + + def test_laguerre_int(self): + assert_mpmath_equal( + lambda n, x: sc.eval_laguerre(int(n), x), + lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), + [IntArg(), Arg()], + n=20000, + ) + + @pytest.mark.xfail_on_32bit("see gh-3551 for bad points") + def test_lambertw_real(self): + assert_mpmath_equal( + lambda x, k: sc.lambertw(x, int(k.real)), + lambda x, k: mpmath.lambertw(x, int(k.real)), + [ComplexArg(-np.inf, np.inf), IntArg(0, 10)], + rtol=1e-13, nan_ok=False, + ) + + def test_lanczos_sum_expg_scaled(self): + maxgamma = 171.624376956302725 + e = np.exp(1) + g = 6.024680040776729583740234375 + + def gamma(x): + with np.errstate(over='ignore'): + fac = ((x + g - 0.5)/e)**(x - 0.5) + if fac != np.inf: + res = fac*_lanczos_sum_expg_scaled(x) + else: + fac = ((x + g - 0.5)/e)**(0.5*(x - 0.5)) + res = fac*_lanczos_sum_expg_scaled(x) + res *= fac + return res + + assert_mpmath_equal( + gamma, + mpmath.gamma, + [Arg(0, maxgamma, inclusive_a=False)], + rtol=1e-13, + ) + + @nonfunctional_tooslow + def test_legendre(self): + assert_mpmath_equal(sc.eval_legendre, mpmath.legendre, [Arg(), Arg()]) + + def test_legendre_int(self): + assert_mpmath_equal( + lambda n, x: sc.eval_legendre(int(n), x), + lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), + [IntArg(), Arg()], + n=20000, + ) + + # Check the small-x expansion + assert_mpmath_equal( + lambda n, x: sc.eval_legendre(int(n), x), + lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), + [IntArg(), FixedArg(np.logspace(-30, -4, 20))], + ) + + def test_legenp(self): + def lpnm(n, m, z): + try: + v = sc.lpmn(m, n, z)[0][-1,-1] + except ValueError: + return np.nan + if abs(v) > 1e306: + # harmonize overflow to inf + v = np.inf * np.sign(v.real) + return v + + def lpnm_2(n, m, z): + v = sc.lpmv(m, n, z) + if abs(v) > 1e306: + # harmonize overflow to inf + v = np.inf * np.sign(v.real) + return v + + def legenp(n, m, z): + if (z == 1 or z == -1) and int(n) == n: + # Special case (mpmath may give inf, we take the limit by + # continuity) + if m == 0: + if n < 0: + n = -n - 1 + return mpmath.power(mpmath.sign(z), n) + else: + return 0 + + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + + typ = 2 if abs(z) < 1 else 3 + v = exception_to_nan(mpmath.legenp)(n, m, z, type=typ) + + if abs(v) > 1e306: + # harmonize overflow to inf + v = mpmath.inf * mpmath.sign(v.real) + + return v + + assert_mpmath_equal(lpnm, legenp, [IntArg(-100, 100), IntArg(-100, 100), Arg()]) + + assert_mpmath_equal( + lpnm_2, + legenp, + [IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)], + atol=1e-10, + ) + + def test_legenp_complex_2(self): + def clpnm(n, m, z): + try: + return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1] + except ValueError: + return np.nan + + def legenp(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2) + + # mpmath is quite slow here + x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) + y = np.array([-1e3, -0.5, 0.5, 1.3]) + z = (x[:,None] + 1j*y[None,:]).ravel() + + assert_mpmath_equal( + clpnm, + legenp, + [FixedArg([-2, -1, 0, 1, 2, 10]), + FixedArg([-2, -1, 0, 1, 2, 10]), + FixedArg(z)], + rtol=1e-6, + n=500, + ) + + def test_legenp_complex_3(self): + def clpnm(n, m, z): + try: + return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1] + except ValueError: + return np.nan + + def legenp(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3) + + # mpmath is quite slow here + x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) + y = np.array([-1e3, -0.5, 0.5, 1.3]) + z = (x[:,None] + 1j*y[None,:]).ravel() + + assert_mpmath_equal( + clpnm, + legenp, + [FixedArg([-2, -1, 0, 1, 2, 10]), + FixedArg([-2, -1, 0, 1, 2, 10]), + FixedArg(z)], + rtol=1e-6, + n=500, + ) + + @pytest.mark.xfail(run=False, reason="apparently picks wrong function at |z| > 1") + def test_legenq(self): + def lqnm(n, m, z): + return sc.lqmn(m, n, z)[0][-1,-1] + + def legenq(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenq)(n, m, z, type=2) + + assert_mpmath_equal( + lqnm, + legenq, + [IntArg(0, 100), IntArg(0, 100), Arg()], + ) + + @nonfunctional_tooslow + def test_legenq_complex(self): + def lqnm(n, m, z): + return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1] + + def legenq(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2) + + assert_mpmath_equal( + lqnm, + legenq, + [IntArg(0, 100), IntArg(0, 100), ComplexArg()], + n=100, + ) + + def test_lgam1p(self): + def param_filter(x): + # Filter the poles + return np.where((np.floor(x) == x) & (x <= 0), False, True) + + def mp_lgam1p(z): + # The real part of loggamma is log(|gamma(z)|) + return mpmath.loggamma(1 + z).real + + assert_mpmath_equal( + _lgam1p, + mp_lgam1p, + [Arg()], + rtol=1e-13, + dps=100, + param_filter=param_filter, + ) + + def test_loggamma(self): + def mpmath_loggamma(z): + try: + res = mpmath.loggamma(z) + except ValueError: + res = complex(np.nan, np.nan) + return res + + assert_mpmath_equal( + sc.loggamma, + mpmath_loggamma, + [ComplexArg()], + nan_ok=False, + distinguish_nan_and_inf=False, + rtol=5e-14, + ) + + @pytest.mark.xfail(run=False) + def test_pcfd(self): + def pcfd(v, x): + return sc.pbdv(v, x)[0] + assert_mpmath_equal( + pcfd, + exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)), + [Arg(), Arg()], + ) + + @pytest.mark.xfail(run=False, reason="it's not the same as the mpmath function --- " + "maybe different definition?") + def test_pcfv(self): + def pcfv(v, x): + return sc.pbvv(v, x)[0] + assert_mpmath_equal( + pcfv, + lambda v, x: time_limited()(exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW), + [Arg(), Arg()], + n=1000, + ) + + def test_pcfw(self): + def pcfw(a, x): + return sc.pbwa(a, x)[0] + + def dpcfw(a, x): + return sc.pbwa(a, x)[1] + + def mpmath_dpcfw(a, x): + return mpmath.diff(mpmath.pcfw, (a, x), (0, 1)) + + # The Zhang and Jin implementation only uses Taylor series and + # is thus accurate in only a very small range. + assert_mpmath_equal( + pcfw, + mpmath.pcfw, + [Arg(-5, 5), Arg(-5, 5)], + rtol=2e-8, + n=100, + ) + + assert_mpmath_equal( + dpcfw, + mpmath_dpcfw, + [Arg(-5, 5), Arg(-5, 5)], + rtol=2e-9, + n=100, + ) + + @pytest.mark.xfail(run=False, + reason="issues at large arguments (atol OK, rtol not) " + "and = _pep440.Version("1.0.0"): + # no workarounds needed + mppoch = mpmath.rf + else: + def mppoch(a, m): + # deal with cases where the result in double precision + # hits exactly a non-positive integer, but the + # corresponding extended-precision mpf floats don't + if float(a + m) == int(a + m) and float(a + m) <= 0: + a = mpmath.mpf(a) + m = int(a + m) - a + return mpmath.rf(a, m) + + assert_mpmath_equal(sc.poch, mppoch, [Arg(), Arg()], dps=400) + + def test_sinpi(self): + eps = np.finfo(float).eps + assert_mpmath_equal( + _sinpi, + mpmath.sinpi, + [Arg()], + nan_ok=False, + rtol=2*eps, + ) + + def test_sinpi_complex(self): + assert_mpmath_equal( + _sinpi, + mpmath.sinpi, + [ComplexArg()], + nan_ok=False, + rtol=2e-14, + ) + + def test_shi(self): + def shi(x): + return sc.shichi(x)[0] + assert_mpmath_equal(shi, mpmath.shi, [Arg()]) + # check asymptotic series cross-over + assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) + + def test_shi_complex(self): + def shi(z): + return sc.shichi(z)[0] + # shi oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal( + shi, + mpmath.shi, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-12, + ) + + def test_si(self): + def si(x): + return sc.sici(x)[0] + assert_mpmath_equal(si, mpmath.si, [Arg()]) + + def test_si_complex(self): + def si(z): + return sc.sici(z)[0] + # si oscillates as Re[z] -> +- inf, so limit range + assert_mpmath_equal( + si, + mpmath.si, + [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))], + rtol=1e-12, + ) + + def test_spence(self): + # mpmath uses a different convention for the dilogarithm + def dilog(x): + return mpmath.polylog(2, 1 - x) + # Spence has a branch cut on the negative real axis + assert_mpmath_equal( + sc.spence, + exception_to_nan(dilog), + [Arg(0, np.inf)], + rtol=1e-14, + ) + + def test_spence_complex(self): + def dilog(z): + return mpmath.polylog(2, 1 - z) + assert_mpmath_equal( + sc.spence, + exception_to_nan(dilog), + [ComplexArg()], + rtol=1e-14, + ) + + def test_spherharm(self): + def spherharm(l, m, theta, phi): + if m > l: + return np.nan + return sc.sph_harm(m, l, phi, theta) + assert_mpmath_equal( + spherharm, + mpmath.spherharm, + [IntArg(0, 100), IntArg(0, 100), Arg(a=0, b=pi), Arg(a=0, b=2*pi)], + atol=1e-8, + n=6000, + dps=150, + ) + + def test_struveh(self): + assert_mpmath_equal( + sc.struve, + exception_to_nan(mpmath.struveh), + [Arg(-1e4, 1e4), Arg(0, 1e4)], + rtol=5e-10, + ) + + def test_struvel(self): + def mp_struvel(v, z): + if v < 0 and z < -v and abs(v) > 1000: + # larger DPS needed for correct results + old_dps = mpmath.mp.dps + try: + mpmath.mp.dps = 300 + return mpmath.struvel(v, z) + finally: + mpmath.mp.dps = old_dps + return mpmath.struvel(v, z) + + assert_mpmath_equal( + sc.modstruve, + exception_to_nan(mp_struvel), + [Arg(-1e4, 1e4), Arg(0, 1e4)], + rtol=5e-10, + ignore_inf_sign=True, + ) + + def test_wrightomega_real(self): + def mpmath_wrightomega_real(x): + return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5')) + + # For x < -1000 the Wright Omega function is just 0 to double + # precision, and for x > 1e21 it is just x to double + # precision. + assert_mpmath_equal( + sc.wrightomega, + mpmath_wrightomega_real, + [Arg(-1000, 1e21)], + rtol=5e-15, + atol=0, + nan_ok=False, + ) + + def test_wrightomega(self): + assert_mpmath_equal( + sc.wrightomega, + lambda z: _mpmath_wrightomega(z, 25), + [ComplexArg()], + rtol=1e-14, + nan_ok=False, + ) + + def test_hurwitz_zeta(self): + assert_mpmath_equal( + sc.zeta, + exception_to_nan(mpmath.zeta), + [Arg(a=1, b=1e10, inclusive_a=False), Arg(a=0, inclusive_a=False)], + ) + + def test_riemann_zeta(self): + assert_mpmath_equal( + sc.zeta, + lambda x: mpmath.zeta(x) if x != 1 else mpmath.inf, + [Arg(-100, 100)], + nan_ok=False, + rtol=5e-13, + ) + + def test_zetac(self): + assert_mpmath_equal( + sc.zetac, + lambda x: mpmath.zeta(x) - 1 if x != 1 else mpmath.inf, + [Arg(-100, 100)], + nan_ok=False, + dps=45, + rtol=5e-13, + ) + + def test_boxcox(self): + + def mp_boxcox(x, lmbda): + x = mpmath.mp.mpf(x) + lmbda = mpmath.mp.mpf(lmbda) + if lmbda == 0: + return mpmath.mp.log(x) + else: + return mpmath.mp.powm1(x, lmbda) / lmbda + + assert_mpmath_equal( + sc.boxcox, + exception_to_nan(mp_boxcox), + [Arg(a=0, inclusive_a=False), Arg()], + n=200, + dps=60, + rtol=1e-13, + ) + + def test_boxcox1p(self): + + def mp_boxcox1p(x, lmbda): + x = mpmath.mp.mpf(x) + lmbda = mpmath.mp.mpf(lmbda) + one = mpmath.mp.mpf(1) + if lmbda == 0: + return mpmath.mp.log(one + x) + else: + return mpmath.mp.powm1(one + x, lmbda) / lmbda + + assert_mpmath_equal( + sc.boxcox1p, + exception_to_nan(mp_boxcox1p), + [Arg(a=-1, inclusive_a=False), Arg()], + n=200, + dps=60, + rtol=1e-13, + ) + + def test_spherical_jn(self): + def mp_spherical_jn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_jn(int(n), z), + exception_to_nan(mp_spherical_jn), + [IntArg(0, 200), Arg(-1e8, 1e8)], + dps=300, + ) + + def test_spherical_jn_complex(self): + def mp_spherical_jn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_jn(int(n.real), z), + exception_to_nan(mp_spherical_jn), + [IntArg(0, 200), ComplexArg()] + ) + + def test_spherical_yn(self): + def mp_spherical_yn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_yn(int(n), z), + exception_to_nan(mp_spherical_yn), + [IntArg(0, 200), Arg(-1e10, 1e10)], + dps=100, + ) + + def test_spherical_yn_complex(self): + def mp_spherical_yn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_yn(int(n.real), z), + exception_to_nan(mp_spherical_yn), + [IntArg(0, 200), ComplexArg()], + ) + + def test_spherical_in(self): + def mp_spherical_in(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_in(int(n), z), + exception_to_nan(mp_spherical_in), + [IntArg(0, 200), Arg()], + dps=200, + atol=10**(-278), + ) + + def test_spherical_in_complex(self): + def mp_spherical_in(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_in(int(n.real), z), + exception_to_nan(mp_spherical_in), + [IntArg(0, 200), ComplexArg()], + ) + + def test_spherical_kn(self): + def mp_spherical_kn(n, z): + out = (mpmath.besselk(n + mpmath.mpf(1)/2, z) * + mpmath.sqrt(mpmath.pi/(2*mpmath.mpmathify(z)))) + if mpmath.mpmathify(z).imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_kn(int(n), z), + exception_to_nan(mp_spherical_kn), + [IntArg(0, 150), Arg()], + dps=100, + ) + + @pytest.mark.xfail(run=False, + reason="Accuracy issues near z = -1 inherited from kv.") + def test_spherical_kn_complex(self): + def mp_spherical_kn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besselk(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal( + lambda n, z: sc.spherical_kn(int(n.real), z), + exception_to_nan(mp_spherical_kn), + [IntArg(0, 200), ComplexArg()], + dps=200, + ) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_nan_inputs.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_nan_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..de28fec4e59094354cf98bc93751e11bdefc909d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_nan_inputs.py @@ -0,0 +1,64 @@ +"""Test how the ufuncs in special handle nan inputs. + +""" +from typing import Callable + +import numpy as np +from numpy.testing import assert_array_equal, assert_, suppress_warnings +import pytest +import scipy.special as sc + + +KNOWNFAILURES: dict[str, Callable] = {} + +POSTPROCESSING: dict[str, Callable] = {} + + +def _get_ufuncs(): + ufuncs = [] + ufunc_names = [] + for name in sorted(sc.__dict__): + obj = sc.__dict__[name] + if not isinstance(obj, np.ufunc): + continue + msg = KNOWNFAILURES.get(obj) + if msg is None: + ufuncs.append(obj) + ufunc_names.append(name) + else: + fail = pytest.mark.xfail(run=False, reason=msg) + ufuncs.append(pytest.param(obj, marks=fail)) + ufunc_names.append(name) + return ufuncs, ufunc_names + + +UFUNCS, UFUNC_NAMES = _get_ufuncs() + + +@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES) +def test_nan_inputs(func): + args = (np.nan,)*func.nin + with suppress_warnings() as sup: + # Ignore warnings about unsafe casts from legacy wrappers + sup.filter(RuntimeWarning, + "floating point number truncated to an integer") + try: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + res = func(*args) + except TypeError: + # One of the arguments doesn't take real inputs + return + if func in POSTPROCESSING: + res = POSTPROCESSING[func](*res) + + msg = f"got {res} instead of nan" + assert_array_equal(np.isnan(res), True, err_msg=msg) + + +def test_legacy_cast(): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "floating point number truncated to an integer") + res = sc.bdtrc(np.nan, 1, 0.5) + assert_(np.isnan(res)) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ndtr.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ndtr.py new file mode 100644 index 0000000000000000000000000000000000000000..ba9b689b34384585cc65204000febcb99c910d55 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ndtr.py @@ -0,0 +1,77 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +import scipy.special as sc + + +def test_ndtr(): + assert_equal(sc.ndtr(0), 0.5) + assert_allclose(sc.ndtr(1), 0.8413447460685429) + + +class TestNdtri: + + def test_zero(self): + assert sc.ndtri(0.5) == 0.0 + + def test_asymptotes(self): + assert_equal(sc.ndtri([0.0, 1.0]), [-np.inf, np.inf]) + + def test_outside_of_domain(self): + assert all(np.isnan(sc.ndtri([-1.5, 1.5]))) + + +class TestLogNdtr: + + # The expected values in these tests were computed with mpmath: + # + # def log_ndtr_mp(x): + # return mpmath.log(mpmath.ncdf(x)) + # + + def test_log_ndtr_moderate_le8(self): + x = np.array([-0.75, -0.25, 0, 0.5, 1.5, 2.5, 3, 4, 5, 7, 8]) + expected = np.array([-1.4844482299196562, + -0.9130617648111351, + -0.6931471805599453, + -0.3689464152886564, + -0.06914345561223398, + -0.006229025485860002, + -0.0013508099647481938, + -3.167174337748927e-05, + -2.866516129637636e-07, + -1.279812543886654e-12, + -6.220960574271786e-16]) + y = sc.log_ndtr(x) + assert_allclose(y, expected, rtol=1e-14) + + def test_log_ndtr_values_8_16(self): + x = np.array([8.001, 8.06, 8.15, 8.5, 10, 12, 14, 16]) + expected = [-6.170639424817055e-16, + -3.814722443652823e-16, + -1.819621363526629e-16, + -9.479534822203318e-18, + -7.619853024160525e-24, + -1.776482112077679e-33, + -7.7935368191928e-45, + -6.388754400538087e-58] + y = sc.log_ndtr(x) + assert_allclose(y, expected, rtol=5e-14) + + def test_log_ndtr_values_16_31(self): + x = np.array([16.15, 20.3, 21.4, 26.2, 30.9]) + expected = [-5.678084565148492e-59, + -6.429244467698346e-92, + -6.680402412553295e-102, + -1.328698078458869e-151, + -5.972288641838264e-210] + y = sc.log_ndtr(x) + assert_allclose(y, expected, rtol=2e-13) + + def test_log_ndtr_values_gt31(self): + x = np.array([31.6, 32.8, 34.9, 37.1]) + expected = [-1.846036234858162e-219, + -2.9440539964066835e-236, + -3.71721649450857e-267, + -1.4047119663106221e-301] + y = sc.log_ndtr(x) + assert_allclose(y, expected, rtol=3e-13) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ndtri_exp.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ndtri_exp.py new file mode 100644 index 0000000000000000000000000000000000000000..82a9fbd3bcda117770e00018facda3f56630a6bc --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_ndtri_exp.py @@ -0,0 +1,94 @@ +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.special import log_ndtr, ndtri_exp +from scipy.special._testutils import assert_func_equal + + +def log_ndtr_ndtri_exp(y): + return log_ndtr(ndtri_exp(y)) + + +@pytest.fixture(scope="class") +def uniform_random_points(): + random_state = np.random.RandomState(1234) + points = random_state.random_sample(1000) + return points + + +class TestNdtriExp: + """Tests that ndtri_exp is sufficiently close to an inverse of log_ndtr. + + We have separate tests for the five intervals (-inf, -10), + [-10, -2), [-2, -0.14542), [-0.14542, -1e-6), and [-1e-6, 0). + ndtri_exp(y) is computed in three different ways depending on if y + is in (-inf, -2), [-2, log(1 - exp(-2))], or [log(1 - exp(-2), 0). + Each of these intervals is given its own test with two additional tests + for handling very small values and values very close to zero. + """ + + @pytest.mark.parametrize( + "test_input", [-1e1, -1e2, -1e10, -1e20, -np.finfo(float).max] + ) + def test_very_small_arg(self, test_input, uniform_random_points): + scale = test_input + points = scale * (0.5 * uniform_random_points + 0.5) + assert_func_equal( + log_ndtr_ndtri_exp, + lambda y: y, points, + rtol=1e-14, + nan_ok=True + ) + + @pytest.mark.parametrize( + "interval,expected_rtol", + [ + ((-10, -2), 1e-14), + ((-2, -0.14542), 1e-12), + ((-0.14542, -1e-6), 1e-10), + ((-1e-6, 0), 1e-6), + ], + ) + def test_in_interval(self, interval, expected_rtol, uniform_random_points): + left, right = interval + points = (right - left) * uniform_random_points + left + assert_func_equal( + log_ndtr_ndtri_exp, + lambda y: y, points, + rtol=expected_rtol, + nan_ok=True + ) + + def test_extreme(self): + # bigneg is not quite the largest negative double precision value. + # Here's why: + # The round-trip calculation + # y = ndtri_exp(bigneg) + # bigneg2 = log_ndtr(y) + # where bigneg is a very large negative value, would--with infinite + # precision--result in bigneg2 == bigneg. When bigneg is large enough, + # y is effectively equal to -sqrt(2)*sqrt(-bigneg), and log_ndtr(y) is + # effectively -(y/sqrt(2))**2. If we use bigneg = np.finfo(float).min, + # then by construction, the theoretical value is the most negative + # finite value that can be represented with 64 bit float point. This + # means tiny changes in how the computation proceeds can result in the + # return value being -inf. (E.g. changing the constant representation + # of 1/sqrt(2) from 0.7071067811865475--which is the value returned by + # 1/np.sqrt(2)--to 0.7071067811865476--which is the most accurate 64 + # bit floating point representation of 1/sqrt(2)--results in the + # round-trip that starts with np.finfo(float).min returning -inf. So + # we'll move the bigneg value a few ULPs towards 0 to avoid this + # sensitivity. + # Use the reduce method to apply nextafter four times. + bigneg = np.nextafter.reduce([np.finfo(float).min, 0, 0, 0, 0]) + # tinyneg is approx. -2.225e-308. + tinyneg = -np.finfo(float).tiny + x = np.array([tinyneg, bigneg]) + result = log_ndtr_ndtri_exp(x) + assert_allclose(result, x, rtol=1e-12) + + def test_asymptotes(self): + assert_equal(ndtri_exp([-np.inf, 0.0]), [-np.inf, np.inf]) + + def test_outside_domain(self): + assert np.isnan(ndtri_exp(1.0)) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_orthogonal.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_orthogonal.py new file mode 100644 index 0000000000000000000000000000000000000000..5b644a62c0b64b022a171da423953f1ff2e9b79d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_orthogonal.py @@ -0,0 +1,804 @@ +import numpy as np +from numpy import array, sqrt +from numpy.testing import (assert_array_almost_equal, assert_equal, + assert_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from scipy import integrate +import scipy.special as sc +from scipy.special import gamma +import scipy.special._orthogonal as orth + + +class TestCheby: + def test_chebyc(self): + C0 = orth.chebyc(0) + C1 = orth.chebyc(1) + with np.errstate(all='ignore'): + C2 = orth.chebyc(2) + C3 = orth.chebyc(3) + C4 = orth.chebyc(4) + C5 = orth.chebyc(5) + + assert_array_almost_equal(C0.c,[2],13) + assert_array_almost_equal(C1.c,[1,0],13) + assert_array_almost_equal(C2.c,[1,0,-2],13) + assert_array_almost_equal(C3.c,[1,0,-3,0],13) + assert_array_almost_equal(C4.c,[1,0,-4,0,2],13) + assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13) + + def test_chebys(self): + S0 = orth.chebys(0) + S1 = orth.chebys(1) + S2 = orth.chebys(2) + S3 = orth.chebys(3) + S4 = orth.chebys(4) + S5 = orth.chebys(5) + assert_array_almost_equal(S0.c,[1],13) + assert_array_almost_equal(S1.c,[1,0],13) + assert_array_almost_equal(S2.c,[1,0,-1],13) + assert_array_almost_equal(S3.c,[1,0,-2,0],13) + assert_array_almost_equal(S4.c,[1,0,-3,0,1],13) + assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13) + + def test_chebyt(self): + T0 = orth.chebyt(0) + T1 = orth.chebyt(1) + T2 = orth.chebyt(2) + T3 = orth.chebyt(3) + T4 = orth.chebyt(4) + T5 = orth.chebyt(5) + assert_array_almost_equal(T0.c,[1],13) + assert_array_almost_equal(T1.c,[1,0],13) + assert_array_almost_equal(T2.c,[2,0,-1],13) + assert_array_almost_equal(T3.c,[4,0,-3,0],13) + assert_array_almost_equal(T4.c,[8,0,-8,0,1],13) + assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13) + + def test_chebyu(self): + U0 = orth.chebyu(0) + U1 = orth.chebyu(1) + U2 = orth.chebyu(2) + U3 = orth.chebyu(3) + U4 = orth.chebyu(4) + U5 = orth.chebyu(5) + assert_array_almost_equal(U0.c,[1],13) + assert_array_almost_equal(U1.c,[2,0],13) + assert_array_almost_equal(U2.c,[4,0,-1],13) + assert_array_almost_equal(U3.c,[8,0,-4,0],13) + assert_array_almost_equal(U4.c,[16,0,-12,0,1],13) + assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13) + + +class TestGegenbauer: + + def test_gegenbauer(self): + a = 5*np.random.random() - 0.5 + if np.any(a == 0): + a = -0.2 + Ca0 = orth.gegenbauer(0,a) + Ca1 = orth.gegenbauer(1,a) + Ca2 = orth.gegenbauer(2,a) + Ca3 = orth.gegenbauer(3,a) + Ca4 = orth.gegenbauer(4,a) + Ca5 = orth.gegenbauer(5,a) + + assert_array_almost_equal(Ca0.c,array([1]),13) + assert_array_almost_equal(Ca1.c,array([2*a,0]),13) + assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13) + assert_array_almost_equal(Ca3.c,array([4*sc.poch(a,3),0,-6*a*(a+1), + 0])/3.0,11) + assert_array_almost_equal(Ca4.c,array([4*sc.poch(a,4),0,-12*sc.poch(a,3), + 0,3*a*(a+1)])/6.0,11) + assert_array_almost_equal(Ca5.c,array([4*sc.poch(a,5),0,-20*sc.poch(a,4), + 0,15*sc.poch(a,3),0])/15.0,11) + + +class TestHermite: + def test_hermite(self): + H0 = orth.hermite(0) + H1 = orth.hermite(1) + H2 = orth.hermite(2) + H3 = orth.hermite(3) + H4 = orth.hermite(4) + H5 = orth.hermite(5) + assert_array_almost_equal(H0.c,[1],13) + assert_array_almost_equal(H1.c,[2,0],13) + assert_array_almost_equal(H2.c,[4,0,-2],13) + assert_array_almost_equal(H3.c,[8,0,-12,0],13) + assert_array_almost_equal(H4.c,[16,0,-48,0,12],12) + assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12) + + def test_hermitenorm(self): + # He_n(x) = 2**(-n/2) H_n(x/sqrt(2)) + psub = np.poly1d([1.0/sqrt(2),0]) + H0 = orth.hermitenorm(0) + H1 = orth.hermitenorm(1) + H2 = orth.hermitenorm(2) + H3 = orth.hermitenorm(3) + H4 = orth.hermitenorm(4) + H5 = orth.hermitenorm(5) + he0 = orth.hermite(0)(psub) + he1 = orth.hermite(1)(psub) / sqrt(2) + he2 = orth.hermite(2)(psub) / 2.0 + he3 = orth.hermite(3)(psub) / (2*sqrt(2)) + he4 = orth.hermite(4)(psub) / 4.0 + he5 = orth.hermite(5)(psub) / (4.0*sqrt(2)) + + assert_array_almost_equal(H0.c,he0.c,13) + assert_array_almost_equal(H1.c,he1.c,13) + assert_array_almost_equal(H2.c,he2.c,13) + assert_array_almost_equal(H3.c,he3.c,13) + assert_array_almost_equal(H4.c,he4.c,13) + assert_array_almost_equal(H5.c,he5.c,13) + + +class TestShLegendre: + def test_sh_legendre(self): + # P*_n(x) = P_n(2x-1) + psub = np.poly1d([2,-1]) + Ps0 = orth.sh_legendre(0) + Ps1 = orth.sh_legendre(1) + Ps2 = orth.sh_legendre(2) + Ps3 = orth.sh_legendre(3) + Ps4 = orth.sh_legendre(4) + Ps5 = orth.sh_legendre(5) + pse0 = orth.legendre(0)(psub) + pse1 = orth.legendre(1)(psub) + pse2 = orth.legendre(2)(psub) + pse3 = orth.legendre(3)(psub) + pse4 = orth.legendre(4)(psub) + pse5 = orth.legendre(5)(psub) + assert_array_almost_equal(Ps0.c,pse0.c,13) + assert_array_almost_equal(Ps1.c,pse1.c,13) + assert_array_almost_equal(Ps2.c,pse2.c,13) + assert_array_almost_equal(Ps3.c,pse3.c,13) + assert_array_almost_equal(Ps4.c,pse4.c,12) + assert_array_almost_equal(Ps5.c,pse5.c,12) + + +class TestShChebyt: + def test_sh_chebyt(self): + # T*_n(x) = T_n(2x-1) + psub = np.poly1d([2,-1]) + Ts0 = orth.sh_chebyt(0) + Ts1 = orth.sh_chebyt(1) + Ts2 = orth.sh_chebyt(2) + Ts3 = orth.sh_chebyt(3) + Ts4 = orth.sh_chebyt(4) + Ts5 = orth.sh_chebyt(5) + tse0 = orth.chebyt(0)(psub) + tse1 = orth.chebyt(1)(psub) + tse2 = orth.chebyt(2)(psub) + tse3 = orth.chebyt(3)(psub) + tse4 = orth.chebyt(4)(psub) + tse5 = orth.chebyt(5)(psub) + assert_array_almost_equal(Ts0.c,tse0.c,13) + assert_array_almost_equal(Ts1.c,tse1.c,13) + assert_array_almost_equal(Ts2.c,tse2.c,13) + assert_array_almost_equal(Ts3.c,tse3.c,13) + assert_array_almost_equal(Ts4.c,tse4.c,12) + assert_array_almost_equal(Ts5.c,tse5.c,12) + + +class TestShChebyu: + def test_sh_chebyu(self): + # U*_n(x) = U_n(2x-1) + psub = np.poly1d([2,-1]) + Us0 = orth.sh_chebyu(0) + Us1 = orth.sh_chebyu(1) + Us2 = orth.sh_chebyu(2) + Us3 = orth.sh_chebyu(3) + Us4 = orth.sh_chebyu(4) + Us5 = orth.sh_chebyu(5) + use0 = orth.chebyu(0)(psub) + use1 = orth.chebyu(1)(psub) + use2 = orth.chebyu(2)(psub) + use3 = orth.chebyu(3)(psub) + use4 = orth.chebyu(4)(psub) + use5 = orth.chebyu(5)(psub) + assert_array_almost_equal(Us0.c,use0.c,13) + assert_array_almost_equal(Us1.c,use1.c,13) + assert_array_almost_equal(Us2.c,use2.c,13) + assert_array_almost_equal(Us3.c,use3.c,13) + assert_array_almost_equal(Us4.c,use4.c,12) + assert_array_almost_equal(Us5.c,use5.c,11) + + +class TestShJacobi: + def test_sh_jacobi(self): + # G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1) + def conv(n, p): + return gamma(n + 1) * gamma(n + p) / gamma(2 * n + p) + psub = np.poly1d([2,-1]) + q = 4 * np.random.random() + p = q-1 + 2*np.random.random() + # print("shifted jacobi p,q = ", p, q) + G0 = orth.sh_jacobi(0,p,q) + G1 = orth.sh_jacobi(1,p,q) + G2 = orth.sh_jacobi(2,p,q) + G3 = orth.sh_jacobi(3,p,q) + G4 = orth.sh_jacobi(4,p,q) + G5 = orth.sh_jacobi(5,p,q) + ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p) + ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p) + ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p) + ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p) + ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p) + ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p) + + assert_array_almost_equal(G0.c,ge0.c,13) + assert_array_almost_equal(G1.c,ge1.c,13) + assert_array_almost_equal(G2.c,ge2.c,13) + assert_array_almost_equal(G3.c,ge3.c,13) + assert_array_almost_equal(G4.c,ge4.c,13) + assert_array_almost_equal(G5.c,ge5.c,13) + + +class TestCall: + def test_call(self): + poly = [] + for n in range(5): + poly.extend([x.strip() for x in + (""" + orth.jacobi(%(n)d,0.3,0.9) + orth.sh_jacobi(%(n)d,0.3,0.9) + orth.genlaguerre(%(n)d,0.3) + orth.laguerre(%(n)d) + orth.hermite(%(n)d) + orth.hermitenorm(%(n)d) + orth.gegenbauer(%(n)d,0.3) + orth.chebyt(%(n)d) + orth.chebyu(%(n)d) + orth.chebyc(%(n)d) + orth.chebys(%(n)d) + orth.sh_chebyt(%(n)d) + orth.sh_chebyu(%(n)d) + orth.legendre(%(n)d) + orth.sh_legendre(%(n)d) + """ % dict(n=n)).split() + ]) + with np.errstate(all='ignore'): + for pstr in poly: + p = eval(pstr) + assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315), + err_msg=pstr) + + +class TestGenlaguerre: + def test_regression(self): + assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.) + assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.) + assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2])) + assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2])) + + +def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N, + rtol=1e-15, atol=5e-14): + # this test is copied from numpy's TestGauss in test_hermite.py + x, w, mu = root_func(N, True) + + n = np.arange(N, dtype=np.dtype("long")) + v = eval_func(n[:,np.newaxis], x) + vv = np.dot(v*w, v.T) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, np.newaxis] * vv * vd + assert_allclose(vv, np.eye(N), rtol, atol) + + # check that the integral of 1 is correct + assert_allclose(w.sum(), mu, rtol, atol) + + # compare the results of integrating a function with quad. + def f(x): + return x ** 3 - 3 * x ** 2 + x - 2 + resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b) + resG = np.vdot(f(x), w) + rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10 + assert_allclose(resI[0], resG, rtol=rtol) + +def test_roots_jacobi(): + def rf(a, b): + return lambda n, mu: sc.roots_jacobi(n, a, b, mu) + def ef(a, b): + return lambda n, x: sc.eval_jacobi(n, a, b, x) + def wf(a, b): + return lambda x: (1 - x) ** a * (1 + x) ** b + + vgq = verify_gauss_quad + vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5) + vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., + 25, atol=1e-12) + vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., + 100, atol=1e-11) + + vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5) + vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13) + vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=2e-12) + + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12) + + vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5) + vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13) + vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=3e-13) + + vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5) + vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25, + atol=1.1e-14) + vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., + 100, atol=1e-13) + + vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13) + vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13) + vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., + 100, atol=1e-11) + + vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 5, atol=2e-13) + vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 25, atol=1e-12) + vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 100, atol=1e-11) + vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 250, atol=1e-11) + + vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 5, + atol=1e-12) + vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 25, + atol=1e-11) + vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 100, + atol=1e-10) + + vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 5, + atol=1e-12) + vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 25, + atol=1e-11) + vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 100, + atol=1e-10) + + vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 5, + atol=1e-12) + vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 25, + atol=1e-11) + vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 100, + atol=1e-10) + + vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5) + vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25, + atol=1e-13) + vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 100, + atol=1e-13) + + # when alpha == beta == 0, P_n^{a,b}(x) == P_n(x) + xj, wj = sc.roots_jacobi(6, 0.0, 0.0) + xl, wl = sc.roots_legendre(6) + assert_allclose(xj, xl, 1e-14, 1e-14) + assert_allclose(wj, wl, 1e-14, 1e-14) + + # when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x) + xj, wj = sc.roots_jacobi(6, 4.0, 4.0) + xc, wc = sc.roots_gegenbauer(6, 4.5) + assert_allclose(xj, xc, 1e-14, 1e-14) + assert_allclose(wj, wc, 1e-14, 1e-14) + + x, w = sc.roots_jacobi(5, 2, 3, False) + y, v, m = sc.roots_jacobi(5, 2, 3, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(wf(2,3), -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1) + assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1) + assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1) + assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2) + assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2) + +def test_roots_sh_jacobi(): + def rf(a, b): + return lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu) + def ef(a, b): + return lambda n, x: sc.eval_sh_jacobi(n, a, b, x) + def wf(a, b): + return lambda x: (1.0 - x) ** (a - b) * x ** (b - 1.0) + + vgq = verify_gauss_quad + vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5) + vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., + 25, atol=1e-12) + vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., + 100, atol=1e-11) + + vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5) + vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13) + vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12) + + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=2e-12) + + vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5) + vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13) + vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12) + + vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5) + vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25) + vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., + 100, atol=1e-13) + + vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12) + vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11) + vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10) + + vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14) + vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13) + vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., + 100, atol=1e-12) + + x, w = sc.roots_sh_jacobi(5, 3, 2, False) + y, v, m = sc.roots_sh_jacobi(5, 3, 2, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(wf(3,2), 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1) + assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1) + assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2) # p - q <= -1 + assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1) # q <= 0 + assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1) # both + +def test_roots_hermite(): + rootf = sc.roots_hermite + evalf = sc.eval_hermite + weightf = orth.hermite(5).weight_func + + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12) + + # Golub-Welsch branch + x, w = sc.roots_hermite(5, False) + y, v, m = sc.roots_hermite(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -np.inf, np.inf) + assert_allclose(m, muI, rtol=muI_err) + + # Asymptotic branch (switch over at n >= 150) + x, w = sc.roots_hermite(200, False) + y, v, m = sc.roots_hermite(200, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + assert_allclose(sum(v), m, 1e-14, 1e-14) + + assert_raises(ValueError, sc.roots_hermite, 0) + assert_raises(ValueError, sc.roots_hermite, 3.3) + +def test_roots_hermite_asy(): + # Recursion for Hermite functions + def hermite_recursion(n, nodes): + H = np.zeros((n, nodes.size)) + H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2) + if n > 1: + H[1,:] = sqrt(2.0) * nodes * H[0,:] + for k in range(2, n): + H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:] + return H + + # This tests only the nodes + def test(N, rtol=1e-15, atol=1e-14): + x, w = orth._roots_hermite_asy(N) + H = hermite_recursion(N+1, x) + assert_allclose(H[-1,:], np.zeros(N), rtol, atol) + assert_allclose(sum(w), sqrt(np.pi), rtol, atol) + + test(150, atol=1e-12) + test(151, atol=1e-12) + test(300, atol=1e-12) + test(301, atol=1e-12) + test(500, atol=1e-12) + test(501, atol=1e-12) + test(999, atol=1e-12) + test(1000, atol=1e-12) + test(2000, atol=1e-12) + test(5000, atol=1e-12) + +def test_roots_hermitenorm(): + rootf = sc.roots_hermitenorm + evalf = sc.eval_hermitenorm + weightf = orth.hermitenorm(5).weight_func + + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12) + + x, w = sc.roots_hermitenorm(5, False) + y, v, m = sc.roots_hermitenorm(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -np.inf, np.inf) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_hermitenorm, 0) + assert_raises(ValueError, sc.roots_hermitenorm, 3.3) + +def test_roots_gegenbauer(): + def rootf(a): + return lambda n, mu: sc.roots_gegenbauer(n, a, mu) + def evalf(a): + return lambda n, x: sc.eval_gegenbauer(n, a, x) + def weightf(a): + return lambda x: (1 - x ** 2) ** (a - 0.5) + + vgq = verify_gauss_quad + vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5) + vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12) + vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11) + + vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12) + + vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5) + vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13) + vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12) + + vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5) + vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13) + vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12) + + vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13) + vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12) + vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11) + + # Alpha=170 is where the approximation used in roots_gegenbauer changes + vgq(rootf(170), evalf(170), weightf(170), -1., 1., 5, atol=1e-13) + vgq(rootf(170), evalf(170), weightf(170), -1., 1., 25, atol=1e-12) + vgq(rootf(170), evalf(170), weightf(170), -1., 1., 100, atol=1e-11) + vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 5, atol=1.25e-13) + vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 25, atol=1e-12) + vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 100, atol=1e-11) + + # Test for failures, e.g. overflows, resulting from large alphas + vgq(rootf(238), evalf(238), weightf(238), -1., 1., 5, atol=1e-13) + vgq(rootf(238), evalf(238), weightf(238), -1., 1., 25, atol=1e-12) + vgq(rootf(238), evalf(238), weightf(238), -1., 1., 100, atol=1e-11) + vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 5, atol=1e-12) + vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 25, atol=1e-11) + vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 100, atol=1e-10) + + # this is a special case that the old code supported. + # when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes + # to a scaled down copy of T_n(x) there. + vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 5) + vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 25) + vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12) + + x, w = sc.roots_gegenbauer(5, 2, False) + y, v, m = sc.roots_gegenbauer(5, 2, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf(2), -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_gegenbauer, 0, 2) + assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2) + assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75) + +def test_roots_chebyt(): + weightf = orth.chebyt(5).weight_func + verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 5) + verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 25) + verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 100, + atol=1e-12) + + x, w = sc.roots_chebyt(5, False) + y, v, m = sc.roots_chebyt(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebyt, 0) + assert_raises(ValueError, sc.roots_chebyt, 3.3) + +def test_chebyt_symmetry(): + x, w = sc.roots_chebyt(21) + pos, neg = x[:10], x[11:] + assert_equal(neg, -pos[::-1]) + assert_equal(x[10], 0) + +def test_roots_chebyu(): + weightf = orth.chebyu(5).weight_func + verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 5) + verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 25) + verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 100) + + x, w = sc.roots_chebyu(5, False) + y, v, m = sc.roots_chebyu(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebyu, 0) + assert_raises(ValueError, sc.roots_chebyu, 3.3) + +def test_roots_chebyc(): + weightf = orth.chebyc(5).weight_func + verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 5) + verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 25) + verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 100, + atol=1e-12) + + x, w = sc.roots_chebyc(5, False) + y, v, m = sc.roots_chebyc(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -2, 2) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebyc, 0) + assert_raises(ValueError, sc.roots_chebyc, 3.3) + +def test_roots_chebys(): + weightf = orth.chebys(5).weight_func + verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 5) + verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 25) + verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 100) + + x, w = sc.roots_chebys(5, False) + y, v, m = sc.roots_chebys(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -2, 2) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebys, 0) + assert_raises(ValueError, sc.roots_chebys, 3.3) + +def test_roots_sh_chebyt(): + weightf = orth.sh_chebyt(5).weight_func + verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 5) + verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 25) + verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., + 100, atol=1e-13) + + x, w = sc.roots_sh_chebyt(5, False) + y, v, m = sc.roots_sh_chebyt(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_chebyt, 0) + assert_raises(ValueError, sc.roots_sh_chebyt, 3.3) + +def test_roots_sh_chebyu(): + weightf = orth.sh_chebyu(5).weight_func + verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 5) + verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 25) + verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., + 100, atol=1e-13) + + x, w = sc.roots_sh_chebyu(5, False) + y, v, m = sc.roots_sh_chebyu(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_chebyu, 0) + assert_raises(ValueError, sc.roots_sh_chebyu, 3.3) + +def test_roots_legendre(): + weightf = orth.legendre(5).weight_func + verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1., 5) + verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1., + 25, atol=1e-13) + verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1., + 100, atol=1e-12) + + x, w = sc.roots_legendre(5, False) + y, v, m = sc.roots_legendre(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_legendre, 0) + assert_raises(ValueError, sc.roots_legendre, 3.3) + +def test_roots_sh_legendre(): + weightf = orth.sh_legendre(5).weight_func + verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1., 5) + verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1., + 25, atol=1e-13) + verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1., + 100, atol=1e-12) + + x, w = sc.roots_sh_legendre(5, False) + y, v, m = sc.roots_sh_legendre(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_legendre, 0) + assert_raises(ValueError, sc.roots_sh_legendre, 3.3) + +def test_roots_laguerre(): + weightf = orth.laguerre(5).weight_func + verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf, 5) + verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf, + 25, atol=1e-13) + verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf, + 100, atol=1e-12) + + x, w = sc.roots_laguerre(5, False) + y, v, m = sc.roots_laguerre(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, np.inf) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_laguerre, 0) + assert_raises(ValueError, sc.roots_laguerre, 3.3) + +def test_roots_genlaguerre(): + def rootf(a): + return lambda n, mu: sc.roots_genlaguerre(n, a, mu) + def evalf(a): + return lambda n, x: sc.eval_genlaguerre(n, a, x) + def weightf(a): + return lambda x: x ** a * np.exp(-x) + + vgq = verify_gauss_quad + vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5) + vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13) + vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12) + + vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1.6e-13) + + vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5) + vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13) + vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1.03e-13) + + vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5) + vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13) + vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12) + + vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5) + vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13) + vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13) + + x, w = sc.roots_genlaguerre(5, 2, False) + y, v, m = sc.roots_genlaguerre(5, 2, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf(2.), 0., np.inf) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_genlaguerre, 0, 2) + assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2) + assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1) + + +def test_gh_6721(): + # Regression test for gh_6721. This should not raise. + sc.chebyt(65)(0.2) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_orthogonal_eval.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_orthogonal_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..17414d8e0d6773aaca6696169bfe1d9cbb15e379 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_orthogonal_eval.py @@ -0,0 +1,272 @@ +import numpy as np +from numpy.testing import assert_, assert_allclose +import pytest + +from scipy.special import _ufuncs +import scipy.special._orthogonal as orth +from scipy.special._testutils import FuncData + + +def test_eval_chebyt(): + n = np.arange(0, 10000, 7, dtype=np.dtype("long")) + x = 2*np.random.rand() - 1 + v1 = np.cos(n*np.arccos(x)) + v2 = _ufuncs.eval_chebyt(n, x) + assert_(np.allclose(v1, v2, rtol=1e-15)) + + +def test_eval_chebyt_gh20129(): + # https://github.com/scipy/scipy/issues/20129 + assert _ufuncs.eval_chebyt(7, 2 + 0j) == 5042.0 + +def test_eval_genlaguerre_restriction(): + # check it returns nan for alpha <= -1 + assert_(np.isnan(_ufuncs.eval_genlaguerre(0, -1, 0))) + assert_(np.isnan(_ufuncs.eval_genlaguerre(0.1, -1, 0))) + + +def test_warnings(): + # ticket 1334 + with np.errstate(all='raise'): + # these should raise no fp warnings + _ufuncs.eval_legendre(1, 0) + _ufuncs.eval_laguerre(1, 1) + _ufuncs.eval_gegenbauer(1, 1, 0) + + +class TestPolys: + """ + Check that the eval_* functions agree with the constructed polynomials + + """ + + def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10, + nparam=10, nx=10, rtol=1e-8): + np.random.seed(1234) + + dataset = [] + for n in np.arange(nn): + params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges] + params = np.asarray(params).T + if not param_ranges: + params = [0] + for p in params: + if param_ranges: + p = (n,) + tuple(p) + else: + p = (n,) + x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx) + x[0] = x_range[0] # always include domain start point + x[1] = x_range[1] # always include domain end point + poly = np.poly1d(cls(*p).coef) + z = np.c_[np.tile(p, (nx,1)), x, poly(x)] + dataset.append(z) + + dataset = np.concatenate(dataset, axis=0) + + def polyfunc(*p): + p = (p[0].astype(np.dtype("long")),) + p[1:] + return func(*p) + + with np.errstate(all='raise'): + ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1, + rtol=rtol) + ds.check() + + def test_jacobi(self): + self.check_poly(_ufuncs.eval_jacobi, orth.jacobi, + param_ranges=[(-0.99, 10), (-0.99, 10)], + x_range=[-1, 1], rtol=1e-5) + + def test_sh_jacobi(self): + self.check_poly(_ufuncs.eval_sh_jacobi, orth.sh_jacobi, + param_ranges=[(1, 10), (0, 1)], x_range=[0, 1], + rtol=1e-5) + + def test_gegenbauer(self): + self.check_poly(_ufuncs.eval_gegenbauer, orth.gegenbauer, + param_ranges=[(-0.499, 10)], x_range=[-1, 1], + rtol=1e-7) + + def test_chebyt(self): + self.check_poly(_ufuncs.eval_chebyt, orth.chebyt, + param_ranges=[], x_range=[-1, 1]) + + def test_chebyu(self): + self.check_poly(_ufuncs.eval_chebyu, orth.chebyu, + param_ranges=[], x_range=[-1, 1]) + + def test_chebys(self): + self.check_poly(_ufuncs.eval_chebys, orth.chebys, + param_ranges=[], x_range=[-2, 2]) + + def test_chebyc(self): + self.check_poly(_ufuncs.eval_chebyc, orth.chebyc, + param_ranges=[], x_range=[-2, 2]) + + def test_sh_chebyt(self): + with np.errstate(all='ignore'): + self.check_poly(_ufuncs.eval_sh_chebyt, orth.sh_chebyt, + param_ranges=[], x_range=[0, 1]) + + def test_sh_chebyu(self): + self.check_poly(_ufuncs.eval_sh_chebyu, orth.sh_chebyu, + param_ranges=[], x_range=[0, 1]) + + def test_legendre(self): + self.check_poly(_ufuncs.eval_legendre, orth.legendre, + param_ranges=[], x_range=[-1, 1]) + + def test_sh_legendre(self): + with np.errstate(all='ignore'): + self.check_poly(_ufuncs.eval_sh_legendre, orth.sh_legendre, + param_ranges=[], x_range=[0, 1]) + + def test_genlaguerre(self): + self.check_poly(_ufuncs.eval_genlaguerre, orth.genlaguerre, + param_ranges=[(-0.99, 10)], x_range=[0, 100]) + + def test_laguerre(self): + self.check_poly(_ufuncs.eval_laguerre, orth.laguerre, + param_ranges=[], x_range=[0, 100]) + + def test_hermite(self): + self.check_poly(_ufuncs.eval_hermite, orth.hermite, + param_ranges=[], x_range=[-100, 100]) + + def test_hermitenorm(self): + self.check_poly(_ufuncs.eval_hermitenorm, orth.hermitenorm, + param_ranges=[], x_range=[-100, 100]) + + +class TestRecurrence: + """ + Check that the eval_* functions sig='ld->d' and 'dd->d' agree. + + """ + + def check_poly(self, func, param_ranges=[], x_range=[], nn=10, + nparam=10, nx=10, rtol=1e-8): + np.random.seed(1234) + + dataset = [] + for n in np.arange(nn): + params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges] + params = np.asarray(params).T + if not param_ranges: + params = [0] + for p in params: + if param_ranges: + p = (n,) + tuple(p) + else: + p = (n,) + x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx) + x[0] = x_range[0] # always include domain start point + x[1] = x_range[1] # always include domain end point + kw = dict(sig=(len(p)+1)*'d'+'->d') + z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)] + dataset.append(z) + + dataset = np.concatenate(dataset, axis=0) + + def polyfunc(*p): + p = (p[0].astype(int),) + p[1:] + kw = dict(sig='l'+(len(p)-1)*'d'+'->d') + return func(*p, **kw) + + with np.errstate(all='raise'): + ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1, + rtol=rtol) + ds.check() + + def test_jacobi(self): + self.check_poly(_ufuncs.eval_jacobi, + param_ranges=[(-0.99, 10), (-0.99, 10)], + x_range=[-1, 1]) + + def test_sh_jacobi(self): + self.check_poly(_ufuncs.eval_sh_jacobi, + param_ranges=[(1, 10), (0, 1)], x_range=[0, 1]) + + def test_gegenbauer(self): + self.check_poly(_ufuncs.eval_gegenbauer, + param_ranges=[(-0.499, 10)], x_range=[-1, 1]) + + def test_chebyt(self): + self.check_poly(_ufuncs.eval_chebyt, + param_ranges=[], x_range=[-1, 1]) + + def test_chebyu(self): + self.check_poly(_ufuncs.eval_chebyu, + param_ranges=[], x_range=[-1, 1]) + + def test_chebys(self): + self.check_poly(_ufuncs.eval_chebys, + param_ranges=[], x_range=[-2, 2]) + + def test_chebyc(self): + self.check_poly(_ufuncs.eval_chebyc, + param_ranges=[], x_range=[-2, 2]) + + def test_sh_chebyt(self): + self.check_poly(_ufuncs.eval_sh_chebyt, + param_ranges=[], x_range=[0, 1]) + + def test_sh_chebyu(self): + self.check_poly(_ufuncs.eval_sh_chebyu, + param_ranges=[], x_range=[0, 1]) + + def test_legendre(self): + self.check_poly(_ufuncs.eval_legendre, + param_ranges=[], x_range=[-1, 1]) + + def test_sh_legendre(self): + self.check_poly(_ufuncs.eval_sh_legendre, + param_ranges=[], x_range=[0, 1]) + + def test_genlaguerre(self): + self.check_poly(_ufuncs.eval_genlaguerre, + param_ranges=[(-0.99, 10)], x_range=[0, 100]) + + def test_laguerre(self): + self.check_poly(_ufuncs.eval_laguerre, + param_ranges=[], x_range=[0, 100]) + + def test_hermite(self): + v = _ufuncs.eval_hermite(70, 1.0) + a = -1.457076485701412e60 + assert_allclose(v, a) + + +def test_hermite_domain(): + # Regression test for gh-11091. + assert np.isnan(_ufuncs.eval_hermite(-1, 1.0)) + assert np.isnan(_ufuncs.eval_hermitenorm(-1, 1.0)) + + +@pytest.mark.parametrize("n", [0, 1, 2]) +@pytest.mark.parametrize("x", [0, 1, np.nan]) +def test_hermite_nan(n, x): + # Regression test for gh-11369. + assert np.isnan(_ufuncs.eval_hermite(n, x)) == np.any(np.isnan([n, x])) + assert np.isnan(_ufuncs.eval_hermitenorm(n, x)) == np.any(np.isnan([n, x])) + + +@pytest.mark.parametrize('n', [0, 1, 2, 3.2]) +@pytest.mark.parametrize('alpha', [1, np.nan]) +@pytest.mark.parametrize('x', [2, np.nan]) +def test_genlaguerre_nan(n, alpha, x): + # Regression test for gh-11361. + nan_laguerre = np.isnan(_ufuncs.eval_genlaguerre(n, alpha, x)) + nan_arg = np.any(np.isnan([n, alpha, x])) + assert nan_laguerre == nan_arg + + +@pytest.mark.parametrize('n', [0, 1, 2, 3.2]) +@pytest.mark.parametrize('alpha', [0.0, 1, np.nan]) +@pytest.mark.parametrize('x', [1e-6, 2, np.nan]) +def test_gegenbauer_nan(n, alpha, x): + # Regression test for gh-11370. + nan_gegenbauer = np.isnan(_ufuncs.eval_gegenbauer(n, alpha, x)) + nan_arg = np.any(np.isnan([n, alpha, x])) + assert nan_gegenbauer == nan_arg diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_owens_t.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_owens_t.py new file mode 100644 index 0000000000000000000000000000000000000000..8d15aead25302023c5f07d8392c0931995764ced --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_owens_t.py @@ -0,0 +1,53 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose + +import scipy.special as sc + + +def test_symmetries(): + np.random.seed(1234) + a, h = np.random.rand(100), np.random.rand(100) + assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a)) + assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a)) + + +def test_special_cases(): + assert_equal(sc.owens_t(5, 0), 0) + assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi, + rtol=5e-14) + # Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the + # standard normal distribution + assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07, + rtol=5e-14) + + +def test_nans(): + assert_equal(sc.owens_t(20, np.nan), np.nan) + assert_equal(sc.owens_t(np.nan, 20), np.nan) + assert_equal(sc.owens_t(np.nan, np.nan), np.nan) + + +def test_infs(): + h, a = 0, np.inf + # T(0, a) = 1/2Ï€ * arctan(a) + res = 1/(2*np.pi) * np.arctan(a) + assert_allclose(sc.owens_t(h, a), res, rtol=5e-14) + assert_allclose(sc.owens_t(h, -a), -res, rtol=5e-14) + + h = 1 + # Refer Owens T function definition in Wikipedia + # https://en.wikipedia.org/wiki/Owen%27s_T_function + # Value approximated through Numerical Integration + # using scipy.integrate.quad + # quad(lambda x: 1/(2*pi)*(exp(-0.5*(1*1)*(1+x*x))/(1+x*x)), 0, inf) + res = 0.07932762696572854 + assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14) + assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14) + + assert_equal(sc.owens_t(np.inf, 1), 0) + assert_equal(sc.owens_t(-np.inf, 1), 0) + + assert_equal(sc.owens_t(np.inf, np.inf), 0) + assert_equal(sc.owens_t(-np.inf, np.inf), 0) + assert_equal(sc.owens_t(np.inf, -np.inf), -0.0) + assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c42aa688081fb58f79ad2c8ea932d03b33523b --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py @@ -0,0 +1,24 @@ +"""Tests for parabolic cylinder functions. + +""" +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import scipy.special as sc + + +def test_pbwa_segfault(): + # Regression test for https://github.com/scipy/scipy/issues/6208. + # + # Data generated by mpmath. + # + w = 1.02276567211316867161 + wp = -0.48887053372346189882 + assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0) + + +def test_pbwa_nan(): + # Check that NaN's are returned outside of the range in which the + # implementation is accurate. + pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)] + for p in pts: + assert_equal(sc.pbwa(*p), (np.nan, np.nan)) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_pdtr.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_pdtr.py new file mode 100644 index 0000000000000000000000000000000000000000..122e6009bd71e77ae39f55da5cf056500ff526a9 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_pdtr.py @@ -0,0 +1,48 @@ +import numpy as np +import scipy.special as sc +from numpy.testing import assert_almost_equal, assert_array_equal + + +class TestPdtr: + def test(self): + val = sc.pdtr(0, 1) + assert_almost_equal(val, np.exp(-1)) + + def test_m_zero(self): + val = sc.pdtr([0, 1, 2], 0) + assert_array_equal(val, [1, 1, 1]) + + def test_rounding(self): + double_val = sc.pdtr([0.1, 1.1, 2.1], 1.0) + int_val = sc.pdtr([0, 1, 2], 1.0) + assert_array_equal(double_val, int_val) + + def test_inf(self): + val = sc.pdtr(np.inf, 1.0) + assert_almost_equal(val, 1.0) + + def test_domain(self): + val = sc.pdtr(-1.1, 1.0) + assert np.isnan(val) + +class TestPdtrc: + def test_value(self): + val = sc.pdtrc(0, 1) + assert_almost_equal(val, 1 - np.exp(-1)) + + def test_m_zero(self): + val = sc.pdtrc([0, 1, 2], 0.0) + assert_array_equal(val, [0, 0, 0]) + + def test_rounding(self): + double_val = sc.pdtrc([0.1, 1.1, 2.1], 1.0) + int_val = sc.pdtrc([0, 1, 2], 1.0) + assert_array_equal(double_val, int_val) + + def test_inf(self): + val = sc.pdtrc(np.inf, 1.0) + assert_almost_equal(val, 0.0) + + def test_domain(self): + val = sc.pdtrc(-1.1, 1.0) + assert np.isnan(val) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_powm1.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_powm1.py new file mode 100644 index 0000000000000000000000000000000000000000..3d809963f64ddaedf6b59de80dcd5f7ca8fa18a9 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_powm1.py @@ -0,0 +1,65 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose +from scipy.special import powm1 + + +# Expected values were computed with mpmath, e.g. +# +# >>> import mpmath +# >>> mpmath.np.dps = 200 +# >>> print(float(mpmath.powm1(2.0, 1e-7)) +# 6.931472045825965e-08 +# +powm1_test_cases = [ + (1.25, 0.75, 0.18217701125396976, 1e-15), + (2.0, 1e-7, 6.931472045825965e-08, 1e-15), + (25.0, 5e-11, 1.6094379125636148e-10, 1e-15), + (0.99996, 0.75, -3.0000150002530058e-05, 1e-15), + (0.9999999999990905, 20, -1.81898940353014e-11, 1e-15), + (-1.25, 751.0, -6.017550852453444e+72, 2e-15) +] + + +@pytest.mark.parametrize('x, y, expected, rtol', powm1_test_cases) +def test_powm1(x, y, expected, rtol): + p = powm1(x, y) + assert_allclose(p, expected, rtol=rtol) + + +@pytest.mark.parametrize('x, y, expected', + [(0.0, 0.0, 0.0), + (0.0, -1.5, np.inf), + (0.0, 1.75, -1.0), + (-1.5, 2.0, 1.25), + (-1.5, 3.0, -4.375), + (np.nan, 0.0, 0.0), + (1.0, np.nan, 0.0), + (1.0, np.inf, 0.0), + (1.0, -np.inf, 0.0), + (np.inf, 7.5, np.inf), + (np.inf, -7.5, -1.0), + (3.25, np.inf, np.inf), + (np.inf, np.inf, np.inf), + (np.inf, -np.inf, -1.0), + (np.inf, 0.0, 0.0), + (-np.inf, 0.0, 0.0), + (-np.inf, 2.0, np.inf), + (-np.inf, 3.0, -np.inf), + (-1.0, float(2**53 - 1), -2.0)]) +def test_powm1_exact_cases(x, y, expected): + # Test cases where we have an exact expected value. + p = powm1(x, y) + assert p == expected + + +@pytest.mark.parametrize('x, y', + [(-1.25, 751.03), + (-1.25, np.inf), + (np.nan, np.nan), + (-np.inf, -np.inf), + (-np.inf, 2.5)]) +def test_powm1_return_nan(x, y): + # Test cases where the expected return value is nan. + p = powm1(x, y) + assert np.isnan(p) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_precompute_utils.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_precompute_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..89616b92329691ca76039fe11a7e08f7f3db1150 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_precompute_utils.py @@ -0,0 +1,36 @@ +import pytest + +from scipy.special._testutils import MissingModule, check_version +from scipy.special._mptestutils import mp_assert_allclose +from scipy.special._precompute.utils import lagrange_inversion + +try: + import sympy +except ImportError: + sympy = MissingModule('sympy') + +try: + import mpmath as mp +except ImportError: + mp = MissingModule('mpmath') + + +@pytest.mark.slow +@check_version(sympy, '0.7') +@check_version(mp, '0.19') +class TestInversion: + @pytest.mark.xfail_on_32bit("rtol only 2e-9, see gh-6938") + def test_log(self): + with mp.workdps(30): + logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10) + expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10) + invlogcoeffs = lagrange_inversion(logcoeffs) + mp_assert_allclose(invlogcoeffs, expcoeffs) + + @pytest.mark.xfail_on_32bit("rtol only 1e-15, see gh-6938") + def test_sin(self): + with mp.workdps(30): + sincoeffs = mp.taylor(mp.sin, 0, 10) + asincoeffs = mp.taylor(mp.asin, 0, 10) + invsincoeffs = lagrange_inversion(sincoeffs) + mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_round.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_round.py new file mode 100644 index 0000000000000000000000000000000000000000..ec27e7eed2fe5cd425afd93049b9f043d74be011 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_round.py @@ -0,0 +1,16 @@ +import numpy as np +import pytest + +from scipy.special import _test_internal + + +@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()") +def test_add_round_up(): + np.random.seed(1234) + _test_internal.test_add_round(10**5, 'up') + + +@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()") +def test_add_round_down(): + np.random.seed(1234) + _test_internal.test_add_round(10**5, 'down') diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py new file mode 100644 index 0000000000000000000000000000000000000000..e668629d564ffb37d821c1f1af2869ab045ded50 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py @@ -0,0 +1,134 @@ +import sys +import warnings + +import numpy as np +from numpy.testing import assert_, assert_equal, IS_PYPY +import pytest +from pytest import raises as assert_raises + +import scipy.special as sc +from scipy.special._ufuncs import _sf_error_test_function + +_sf_error_code_map = { + # skip 'ok' + 'singular': 1, + 'underflow': 2, + 'overflow': 3, + 'slow': 4, + 'loss': 5, + 'no_result': 6, + 'domain': 7, + 'arg': 8, + 'other': 9 +} + +_sf_error_actions = [ + 'ignore', + 'warn', + 'raise' +] + + +def _check_action(fun, args, action): + # TODO: special expert should correct + # the coercion at the true location? + args = np.asarray(args, dtype=np.dtype("long")) + if action == 'warn': + with pytest.warns(sc.SpecialFunctionWarning): + fun(*args) + elif action == 'raise': + with assert_raises(sc.SpecialFunctionError): + fun(*args) + else: + # action == 'ignore', make sure there are no warnings/exceptions + with warnings.catch_warnings(): + warnings.simplefilter("error") + fun(*args) + + +def test_geterr(): + err = sc.geterr() + for key, value in err.items(): + assert_(key in _sf_error_code_map) + assert_(value in _sf_error_actions) + + +def test_seterr(): + entry_err = sc.geterr() + try: + for category, error_code in _sf_error_code_map.items(): + for action in _sf_error_actions: + geterr_olderr = sc.geterr() + seterr_olderr = sc.seterr(**{category: action}) + assert_(geterr_olderr == seterr_olderr) + newerr = sc.geterr() + assert_(newerr[category] == action) + geterr_olderr.pop(category) + newerr.pop(category) + assert_(geterr_olderr == newerr) + _check_action(_sf_error_test_function, (error_code,), action) + finally: + sc.seterr(**entry_err) + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_sf_error_special_refcount(): + # Regression test for gh-16233. + # Check that the reference count of scipy.special is not increased + # when a SpecialFunctionError is raised. + refcount_before = sys.getrefcount(sc) + with sc.errstate(all='raise'): + with pytest.raises(sc.SpecialFunctionError, match='domain error'): + sc.ndtri(2.0) + refcount_after = sys.getrefcount(sc) + assert refcount_after == refcount_before + + +def test_errstate_pyx_basic(): + olderr = sc.geterr() + with sc.errstate(singular='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.loggamma(0) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_c_basic(): + olderr = sc.geterr() + with sc.errstate(domain='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.spence(-1) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_cpp_basic(): + olderr = sc.geterr() + with sc.errstate(underflow='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.wrightomega(-1000) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_cpp_scipy_special(): + olderr = sc.geterr() + with sc.errstate(singular='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.lambertw(0, 1) + assert_equal(olderr, sc.geterr()) + + +def test_errstate(): + for category, error_code in _sf_error_code_map.items(): + for action in _sf_error_actions: + olderr = sc.geterr() + with sc.errstate(**{category: action}): + _check_action(_sf_error_test_function, (error_code,), action) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_all_but_one(): + olderr = sc.geterr() + with sc.errstate(all='raise', singular='ignore'): + sc.gammaln(0) + with assert_raises(sc.SpecialFunctionError): + sc.spence(-1.0) + assert_equal(olderr, sc.geterr()) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_specfun.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_specfun.py new file mode 100644 index 0000000000000000000000000000000000000000..891a08d3bce06e1feaddbb760bc7682909781d20 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_specfun.py @@ -0,0 +1,36 @@ +""" +Various made-up tests to hit different branches of the code in specfun.c +""" + +import numpy as np +from numpy.testing import assert_allclose +from scipy import special + + +def test_cchg_branches(): + res = special.hyp1f1(0.1, 1, 7.0-24.0j) + assert_allclose(res, (-3.7659844658568016+4.970311359851648j)) + + +def test_cva2_cv0_branches(): + res, resp = special.mathieu_cem([40, 129], [13, 14], [30, 45]) + assert_allclose(res, np.array([-0.3741211, 0.74441928])) + assert_allclose(resp, np.array([-37.02872758, -86.13549877])) + + res, resp = special.mathieu_sem([40, 129], [13, 14], [30, 45]) + assert_allclose(res, np.array([0.92955551, 0.66771207])) + assert_allclose(resp, np.array([-14.91073448, 96.02954185])) + + +def test_chgm_branches(): + res = special.eval_genlaguerre(-3.2, 3, 2.5) + assert_allclose(res, -0.7077721935779854) + + +def test_hygfz_branches(): + """(z == 1.0) && (c-a-b > 0.0)""" + res = special.hyp2f1(1.5, 2.5, 4.5, 1.+0.j) + assert_allclose(res, 10.30835089459151+0j) + """(cabs(z+1) < eps) && (fabs(c-a+b - 1.0) < eps)""" + res = special.hyp2f1(5+5e-16, 2, 2, -1.0 + 5e-16j) + assert_allclose(res, 0.031249999999999986+3.9062499999999994e-17j) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spence.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spence.py new file mode 100644 index 0000000000000000000000000000000000000000..fbb26ac281dff81ea71b30318731065fe5a78f94 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spence.py @@ -0,0 +1,32 @@ +import numpy as np +from numpy import sqrt, log, pi +from scipy.special._testutils import FuncData +from scipy.special import spence + + +def test_consistency(): + # Make sure the implementation of spence for real arguments + # agrees with the implementation of spence for imaginary arguments. + + x = np.logspace(-30, 300, 200) + dataset = np.vstack((x + 0j, spence(x))).T + FuncData(spence, dataset, 0, 1, rtol=1e-14).check() + + +def test_special_points(): + # Check against known values of Spence's function. + + phi = (1 + sqrt(5))/2 + dataset = [(1, 0), + (2, -pi**2/12), + (0.5, pi**2/12 - log(2)**2/2), + (0, pi**2/6), + (-1, pi**2/4 - 1j*pi*log(2)), + ((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2), + ((3 - sqrt(5))/2, pi**2/10 - log(phi)**2), + (phi, -pi**2/15 + log(phi)**2/2), + # Corrected from Zagier, "The Dilogarithm Function" + ((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)] + + dataset = np.asarray(dataset) + FuncData(spence, dataset, 0, 1, rtol=1e-14).check() diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..c4a047c78fb8542bd0abbb75a4815d777e1414b0 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py @@ -0,0 +1,61 @@ +import numpy as np +from numpy.testing import (assert_array_equal, + assert_array_almost_equal_nulp, assert_almost_equal) +from pytest import raises as assert_raises + +from scipy.special import gammaln, multigammaln + + +class TestMultiGammaLn: + + def test1(self): + # A test of the identity + # Gamma_1(a) = Gamma(a) + np.random.seed(1234) + a = np.abs(np.random.randn()) + assert_array_equal(multigammaln(a, 1), gammaln(a)) + + def test2(self): + # A test of the identity + # Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5) + a = np.array([2.5, 10.0]) + result = multigammaln(a, 2) + expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5) + assert_almost_equal(result, expected) + + def test_bararg(self): + assert_raises(ValueError, multigammaln, 0.5, 1.2) + + +def _check_multigammaln_array_result(a, d): + # Test that the shape of the array returned by multigammaln + # matches the input shape, and that all the values match + # the value computed when multigammaln is called with a scalar. + result = multigammaln(a, d) + assert_array_equal(a.shape, result.shape) + a1 = a.ravel() + result1 = result.ravel() + for i in range(a.size): + assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d)) + + +def test_multigammaln_array_arg(): + # Check that the array returned by multigammaln has the correct + # shape and contains the correct values. The cases have arrays + # with several different shapes. + # The cases include a regression test for ticket #1849 + # (a = np.array([2.0]), an array with a single element). + np.random.seed(1234) + + cases = [ + # a, d + (np.abs(np.random.randn(3, 2)) + 5, 5), + (np.abs(np.random.randn(1, 2)) + 5, 5), + (np.arange(10.0, 18.0).reshape(2, 2, 2), 3), + (np.array([2.0]), 3), + (np.float64(2.0), 3), + ] + + for a, d in cases: + _check_multigammaln_array_result(a, d) + diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py new file mode 100644 index 0000000000000000000000000000000000000000..6539407bc9d8787af9c85d2e9ac3dffccd1ffa50 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py @@ -0,0 +1,37 @@ +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc + + +def test_first_harmonics(): + # Test against explicit representations of the first four + # spherical harmonics which use `theta` as the azimuthal angle, + # `phi` as the polar angle, and include the Condon-Shortley + # phase. + + # Notation is Ymn + def Y00(theta, phi): + return 0.5*np.sqrt(1/np.pi) + + def Yn11(theta, phi): + return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi) + + def Y01(theta, phi): + return 0.5*np.sqrt(3/np.pi)*np.cos(phi) + + def Y11(theta, phi): + return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi) + + harms = [Y00, Yn11, Y01, Y11] + m = [0, -1, 0, 1] + n = [0, 1, 1, 1] + + theta = np.linspace(0, 2*np.pi) + phi = np.linspace(0, np.pi) + theta, phi = np.meshgrid(theta, phi) + + for harm, m, n in zip(harms, m, n): + assert_allclose(sc.sph_harm(m, n, theta, phi), + harm(theta, phi), + rtol=1e-15, atol=1e-15, + err_msg=f"Y^{m}_{n} incorrect") diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..847bb3b49103ee15126291a3dfe9a3e80f2765c3 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py @@ -0,0 +1,385 @@ +# +# Tests of spherical Bessel functions. +# +import numpy as np +from numpy.testing import (assert_almost_equal, assert_allclose, + assert_array_almost_equal, suppress_warnings) +import pytest +from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi + +from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn +from scipy.integrate import quad + + +class TestSphericalJn: + def test_spherical_jn_exact(self): + # https://dlmf.nist.gov/10.49.E3 + # Note: exact expression is numerically stable only for small + # n or z >> n. + x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) + assert_allclose(spherical_jn(2, x), + (-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x)) + + def test_spherical_jn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x), + (2*n + 1)/x*spherical_jn(n, x)) + + def test_spherical_jn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x), + (2*n + 1)/x*spherical_jn(n, x)) + + def test_spherical_jn_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 6 + x = np.array([-inf, inf]) + assert_allclose(spherical_jn(n, x), np.array([0, 0])) + + def test_spherical_jn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E3 + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)])) + + def test_spherical_jn_large_arg_1(self): + # https://github.com/scipy/scipy/issues/2165 + # Reference value computed using mpmath, via + # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) + assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747) + + def test_spherical_jn_large_arg_2(self): + # https://github.com/scipy/scipy/issues/1641 + # Reference value computed using mpmath, via + # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) + assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05) + + def test_spherical_jn_at_zero(self): + # https://dlmf.nist.gov/10.52.E1 + # But note that n = 0 is a special case: j0 = sin(x)/x -> 1 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0])) + + +class TestSphericalYn: + def test_spherical_yn_exact(self): + # https://dlmf.nist.gov/10.49.E5 + # Note: exact expression is numerically stable only for small + # n or z >> n. + x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) + assert_allclose(spherical_yn(2, x), + (1/x - 3/x**3)*cos(x) - 3/x**2*sin(x)) + + def test_spherical_yn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x), + (2*n + 1)/x*spherical_yn(n, x)) + + def test_spherical_yn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x), + (2*n + 1)/x*spherical_yn(n, x)) + + def test_spherical_yn_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 6 + x = np.array([-inf, inf]) + assert_allclose(spherical_yn(n, x), np.array([0, 0])) + + def test_spherical_yn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E3 + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)])) + + def test_spherical_yn_at_zero(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf)) + + def test_spherical_yn_at_zero_complex(self): + # Consistently with numpy: + # >>> -np.cos(0)/0 + # -inf + # >>> -np.cos(0+0j)/(0+0j) + # (-inf + nan*j) + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + 0j + assert_allclose(spherical_yn(n, x), np.full(n.shape, nan)) + + +class TestSphericalJnYnCrossProduct: + def test_spherical_jn_yn_cross_product_1(self): + # https://dlmf.nist.gov/10.50.E3 + n = np.array([1, 5, 8]) + x = np.array([0.1, 1, 10]) + left = (spherical_jn(n + 1, x) * spherical_yn(n, x) - + spherical_jn(n, x) * spherical_yn(n + 1, x)) + right = 1/x**2 + assert_allclose(left, right) + + def test_spherical_jn_yn_cross_product_2(self): + # https://dlmf.nist.gov/10.50.E3 + n = np.array([1, 5, 8]) + x = np.array([0.1, 1, 10]) + left = (spherical_jn(n + 2, x) * spherical_yn(n, x) - + spherical_jn(n, x) * spherical_yn(n + 2, x)) + right = (2*n + 3)/x**3 + assert_allclose(left, right) + + +class TestSphericalIn: + def test_spherical_in_exact(self): + # https://dlmf.nist.gov/10.49.E9 + x = np.array([0.12, 1.23, 12.34, 123.45]) + assert_allclose(spherical_in(2, x), + (1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x)) + + def test_spherical_in_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), + (2*n + 1)/x*spherical_in(n, x)) + + def test_spherical_in_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), + (2*n + 1)/x*spherical_in(n, x)) + + def test_spherical_in_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 5 + x = np.array([-inf, inf]) + assert_allclose(spherical_in(n, x), np.array([-inf, inf])) + + def test_spherical_in_inf_complex(self): + # https://dlmf.nist.gov/10.52.E5 + # Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but + # this appears impossible to achieve because C99 regards any complex + # value with at least one infinite part as a complex infinity, so + # 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is + # the correct return value. + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan])) + + def test_spherical_in_at_zero(self): + # https://dlmf.nist.gov/10.52.E1 + # But note that n = 0 is a special case: i0 = sinh(x)/x -> 1 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0])) + + +class TestSphericalKn: + def test_spherical_kn_exact(self): + # https://dlmf.nist.gov/10.49.E13 + x = np.array([0.12, 1.23, 12.34, 123.45]) + assert_allclose(spherical_kn(2, x), + pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3)) + + def test_spherical_kn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose( + (-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), + (-1)**n*(2*n + 1)/x*spherical_kn(n, x) + ) + + def test_spherical_kn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose( + (-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), + (-1)**n*(2*n + 1)/x*spherical_kn(n, x) + ) + + def test_spherical_kn_inf_real(self): + # https://dlmf.nist.gov/10.52.E6 + n = 5 + x = np.array([-inf, inf]) + assert_allclose(spherical_kn(n, x), np.array([-inf, 0])) + + def test_spherical_kn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E6 + # The behavior at complex infinity depends on the sign of the real + # part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's + # z*inf. This distinction cannot be captured, so we return nan. + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan])) + + def test_spherical_kn_at_zero(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_kn(n, x), np.full(n.shape, inf)) + + def test_spherical_kn_at_zero_complex(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + 0j + assert_allclose(spherical_kn(n, x), np.full(n.shape, nan)) + + +class SphericalDerivativesTestCase: + def fundamental_theorem(self, n, a, b): + integral, tolerance = quad(lambda z: self.df(n, z), a, b) + assert_allclose(integral, + self.f(n, b) - self.f(n, a), + atol=tolerance) + + @pytest.mark.slow + def test_fundamental_theorem_0(self): + self.fundamental_theorem(0, 3.0, 15.0) + + @pytest.mark.slow + def test_fundamental_theorem_7(self): + self.fundamental_theorem(7, 0.5, 1.2) + + +class TestSphericalJnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_jn(n, z) + + def df(self, n, z): + return spherical_jn(n, z, derivative=True) + + def test_spherical_jn_d_zero(self): + n = np.array([0, 1, 2, 3, 7, 15]) + assert_allclose(spherical_jn(n, 0, derivative=True), + np.array([0, 1/3, 0, 0, 0, 0])) + + +class TestSphericalYnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_yn(n, z) + + def df(self, n, z): + return spherical_yn(n, z, derivative=True) + + +class TestSphericalInDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_in(n, z) + + def df(self, n, z): + return spherical_in(n, z, derivative=True) + + def test_spherical_in_d_zero(self): + n = np.array([0, 1, 2, 3, 7, 15]) + spherical_in(n, 0, derivative=False) + assert_allclose(spherical_in(n, 0, derivative=True), + np.array([0, 1/3, 0, 0, 0, 0])) + + +class TestSphericalKnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_kn(n, z) + + def df(self, n, z): + return spherical_kn(n, z, derivative=True) + + +class TestSphericalOld: + # These are tests from the TestSpherical class of test_basic.py, + # rewritten to use spherical_* instead of sph_* but otherwise unchanged. + + def test_sph_in(self): + # This test reproduces test_basic.TestSpherical.test_sph_in. + i1n = np.empty((2,2)) + x = 0.2 + + i1n[0][0] = spherical_in(0, x) + i1n[0][1] = spherical_in(1, x) + i1n[1][0] = spherical_in(0, x, derivative=True) + i1n[1][1] = spherical_in(1, x, derivative=True) + + inp0 = (i1n[0][1]) + inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1]) + assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381, + 0.066933714568029540839]),12) + assert_array_almost_equal(i1n[1],[inp0,inp1],12) + + def test_sph_in_kn_order0(self): + x = 1. + sph_i0 = np.empty((2,)) + sph_i0[0] = spherical_in(0, x) + sph_i0[1] = spherical_in(0, x, derivative=True) + sph_i0_expected = np.array([np.sinh(x)/x, + np.cosh(x)/x-np.sinh(x)/x**2]) + assert_array_almost_equal(r_[sph_i0], sph_i0_expected) + + sph_k0 = np.empty((2,)) + sph_k0[0] = spherical_kn(0, x) + sph_k0[1] = spherical_kn(0, x, derivative=True) + sph_k0_expected = np.array([0.5*pi*exp(-x)/x, + -0.5*pi*exp(-x)*(1/x+1/x**2)]) + assert_array_almost_equal(r_[sph_k0], sph_k0_expected) + + def test_sph_jn(self): + s1 = np.empty((2,3)) + x = 0.2 + + s1[0][0] = spherical_jn(0, x) + s1[0][1] = spherical_jn(1, x) + s1[0][2] = spherical_jn(2, x) + s1[1][0] = spherical_jn(0, x, derivative=True) + s1[1][1] = spherical_jn(1, x, derivative=True) + s1[1][2] = spherical_jn(2, x, derivative=True) + + s10 = -s1[0][1] + s11 = s1[0][0]-2.0/0.2*s1[0][1] + s12 = s1[0][1]-3.0/0.2*s1[0][2] + assert_array_almost_equal(s1[0],[0.99334665397530607731, + 0.066400380670322230863, + 0.0026590560795273856680],12) + assert_array_almost_equal(s1[1],[s10,s11,s12],12) + + def test_sph_kn(self): + kn = np.empty((2,3)) + x = 0.2 + + kn[0][0] = spherical_kn(0, x) + kn[0][1] = spherical_kn(1, x) + kn[0][2] = spherical_kn(2, x) + kn[1][0] = spherical_kn(0, x, derivative=True) + kn[1][1] = spherical_kn(1, x, derivative=True) + kn[1][2] = spherical_kn(2, x, derivative=True) + + kn0 = -kn[0][1] + kn1 = -kn[0][0]-2.0/0.2*kn[0][1] + kn2 = -kn[0][1]-3.0/0.2*kn[0][2] + assert_array_almost_equal(kn[0],[6.4302962978445670140, + 38.581777787067402086, + 585.15696310385559829],12) + assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9) + + def test_sph_yn(self): + sy1 = spherical_yn(2, 0.2) + sy2 = spherical_yn(0, 0.2) + assert_almost_equal(sy1,-377.52483,5) # previous values in the system + assert_almost_equal(sy2,-4.9003329,5) + sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3 + sy3 = spherical_yn(1, 0.2, derivative=True) + # compare correct derivative val. (correct =-system val). + assert_almost_equal(sy3,sphpy,4) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_trig.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_trig.py new file mode 100644 index 0000000000000000000000000000000000000000..578dfbd5e95e6c44b1828716a74c93d645efcb1e --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_trig.py @@ -0,0 +1,72 @@ +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose, suppress_warnings + +from scipy.special._ufuncs import _sinpi as sinpi +from scipy.special._ufuncs import _cospi as cospi + + +def test_integer_real_part(): + x = np.arange(-100, 101) + y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10))) + x, y = np.meshgrid(x, y) + z = x + 1j*y + # In the following we should be *exactly* right + res = sinpi(z) + assert_equal(res.real, 0.0) + res = cospi(z) + assert_equal(res.imag, 0.0) + + +def test_half_integer_real_part(): + x = np.arange(-100, 101) + 0.5 + y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10))) + x, y = np.meshgrid(x, y) + z = x + 1j*y + # In the following we should be *exactly* right + res = sinpi(z) + assert_equal(res.imag, 0.0) + res = cospi(z) + assert_equal(res.real, 0.0) + + +@pytest.mark.skip("Temporary skip while gh-19526 is being resolved") +def test_intermediate_overlow(): + # Make sure we avoid overflow in situations where cosh/sinh would + # overflow but the product with sin/cos would not + sinpi_pts = [complex(1 + 1e-14, 227), + complex(1e-35, 250), + complex(1e-301, 445)] + # Data generated with mpmath + sinpi_std = [complex(-8.113438309924894e+295, -np.inf), + complex(1.9507801934611995e+306, np.inf), + complex(2.205958493464539e+306, np.inf)] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + for p, std in zip(sinpi_pts, sinpi_std): + res = sinpi(p) + assert_allclose(res.real, std.real) + assert_allclose(res.imag, std.imag) + + # Test for cosine, less interesting because cos(0) = 1. + p = complex(0.5 + 1e-14, 227) + std = complex(-8.113438309924894e+295, -np.inf) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + res = cospi(p) + assert_allclose(res.real, std.real) + assert_allclose(res.imag, std.imag) + + +def test_zero_sign(): + y = sinpi(-0.0) + assert y == 0.0 + assert np.signbit(y) + + y = sinpi(0.0) + assert y == 0.0 + assert not np.signbit(y) + + y = cospi(0.5) + assert y == 0.0 + assert not np.signbit(y) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_wright_bessel.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_wright_bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..319db817c1ed3d35de1c08cadea002bf0eac3b0d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_wright_bessel.py @@ -0,0 +1,115 @@ +# Reference MPMATH implementation: +# +# import mpmath +# from mpmath import nsum +# +# def Wright_Series_MPMATH(a, b, z, dps=50, method='r+s+e', steps=[1000]): +# """Compute Wright' generalized Bessel function as Series. +# +# This uses mpmath for arbitrary precision. +# """ +# with mpmath.workdps(dps): +# res = nsum(lambda k: z**k/mpmath.fac(k) * mpmath.rgamma(a*k+b), +# [0, mpmath.inf], +# tol=dps, method=method, steps=steps +# ) +# +# return res + +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose + +import scipy.special as sc +from scipy.special import rgamma, wright_bessel + + +@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10]) +@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10]) +def test_wright_bessel_zero(a, b): + """Test at x = 0.""" + assert_equal(wright_bessel(a, b, 0.), rgamma(b)) + + +@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10]) +@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1]) +def test_wright_bessel_iv(b, x): + """Test relation of wright_bessel and modified bessel function iv. + + iv(z) = (1/2*z)**v * Phi(1, v+1; 1/4*z**2). + See https://dlmf.nist.gov/10.46.E2 + """ + if x != 0: + v = b - 1 + wb = wright_bessel(1, v + 1, x**2 / 4.) + # Note: iv(v, x) has precision of less than 1e-12 for some cases + # e.g v=1-1e-6 and x=1e-06) + assert_allclose(np.power(x / 2., v) * wb, + sc.iv(v, x), + rtol=1e-11, atol=1e-11) + + +@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10]) +@pytest.mark.parametrize('b', [1, 1 + 1e-3, 2, 5, 10]) +@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1, 5, 10, 100]) +def test_wright_functional(a, b, x): + """Test functional relation of wright_bessel. + + Phi(a, b-1, z) = a*z*Phi(a, b+a, z) + (b-1)*Phi(a, b, z) + + Note that d/dx Phi(a, b, x) = Phi(a, b-1, x) + See Eq. (22) of + B. Stankovic, On the Function of E. M. Wright, + Publ. de l' Institut Mathematique, Beograd, + Nouvelle S`er. 10 (1970), 113-124. + """ + assert_allclose(wright_bessel(a, b - 1, x), + a * x * wright_bessel(a, b + a, x) + + (b - 1) * wright_bessel(a, b, x), + rtol=1e-8, atol=1e-8) + + +# grid of rows [a, b, x, value, accuracy] that do not reach 1e-11 accuracy +# see output of: +# cd scipy/scipy/_precompute +# python wright_bessel_data.py +grid_a_b_x_value_acc = np.array([ + [0.1, 100.0, 709.7827128933841, 8.026353022981087e+34, 2e-8], + [0.5, 10.0, 709.7827128933841, 2.680788404494657e+48, 9e-8], + [0.5, 10.0, 1000.0, 2.005901980702872e+64, 1e-8], + [0.5, 100.0, 1000.0, 3.4112367580445246e-117, 6e-8], + [1.0, 20.0, 100000.0, 1.7717158630699857e+225, 3e-11], + [1.0, 100.0, 100000.0, 1.0269334596230763e+22, np.nan], + [1.0000000000000222, 20.0, 100000.0, 1.7717158630001672e+225, 3e-11], + [1.0000000000000222, 100.0, 100000.0, 1.0269334595866202e+22, np.nan], + [1.5, 0.0, 500.0, 15648961196.432373, 3e-11], + [1.5, 2.220446049250313e-14, 500.0, 15648961196.431465, 3e-11], + [1.5, 1e-10, 500.0, 15648961192.344728, 3e-11], + [1.5, 1e-05, 500.0, 15648552437.334162, 3e-11], + [1.5, 0.1, 500.0, 12049870581.10317, 2e-11], + [1.5, 20.0, 100000.0, 7.81930438331405e+43, 3e-9], + [1.5, 100.0, 100000.0, 9.653370857459075e-130, np.nan], + ]) + + +@pytest.mark.xfail +@pytest.mark.parametrize( + 'a, b, x, phi', + grid_a_b_x_value_acc[:, :4].tolist()) +def test_wright_data_grid_failures(a, b, x, phi): + """Test cases of test_data that do not reach relative accuracy of 1e-11""" + assert_allclose(wright_bessel(a, b, x), phi, rtol=1e-11) + + +@pytest.mark.parametrize( + 'a, b, x, phi, accuracy', + grid_a_b_x_value_acc.tolist()) +def test_wright_data_grid_less_accurate(a, b, x, phi, accuracy): + """Test cases of test_data that do not reach relative accuracy of 1e-11 + + Here we test for reduced accuracy or even nan. + """ + if np.isnan(accuracy): + assert np.isnan(wright_bessel(a, b, x)) + else: + assert_allclose(wright_bessel(a, b, x), phi, rtol=accuracy) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_wrightomega.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_wrightomega.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a93ca007e42fea3a0dec634c51b37f03effa9e --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_wrightomega.py @@ -0,0 +1,117 @@ +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_allclose + +import scipy.special as sc +from scipy.special._testutils import assert_func_equal + + +def test_wrightomega_nan(): + pts = [complex(np.nan, 0), + complex(0, np.nan), + complex(np.nan, np.nan), + complex(np.nan, 1), + complex(1, np.nan)] + for p in pts: + res = sc.wrightomega(p) + assert_(np.isnan(res.real)) + assert_(np.isnan(res.imag)) + + +def test_wrightomega_inf_branch(): + pts = [complex(-np.inf, np.pi/4), + complex(-np.inf, -np.pi/4), + complex(-np.inf, 3*np.pi/4), + complex(-np.inf, -3*np.pi/4)] + expected_results = [complex(0.0, 0.0), + complex(0.0, -0.0), + complex(-0.0, 0.0), + complex(-0.0, -0.0)] + for p, expected in zip(pts, expected_results): + res = sc.wrightomega(p) + # We can't use assert_equal(res, expected) because in older versions of + # numpy, assert_equal doesn't check the sign of the real and imaginary + # parts when comparing complex zeros. It does check the sign when the + # arguments are *real* scalars. + assert_equal(res.real, expected.real) + assert_equal(res.imag, expected.imag) + + +def test_wrightomega_inf(): + pts = [complex(np.inf, 10), + complex(-np.inf, 10), + complex(10, np.inf), + complex(10, -np.inf)] + for p in pts: + assert_equal(sc.wrightomega(p), p) + + +def test_wrightomega_singular(): + pts = [complex(-1.0, np.pi), + complex(-1.0, -np.pi)] + for p in pts: + res = sc.wrightomega(p) + assert_equal(res, -1.0) + assert_(np.signbit(res.imag) == np.bool_(False)) + + +@pytest.mark.parametrize('x, desired', [ + (-np.inf, 0), + (np.inf, np.inf), +]) +def test_wrightomega_real_infinities(x, desired): + assert sc.wrightomega(x) == desired + + +def test_wrightomega_real_nan(): + assert np.isnan(sc.wrightomega(np.nan)) + + +def test_wrightomega_real_series_crossover(): + desired_error = 2 * np.finfo(float).eps + crossover = 1e20 + x_before_crossover = np.nextafter(crossover, -np.inf) + x_after_crossover = np.nextafter(crossover, np.inf) + # Computed using Mpmath + desired_before_crossover = 99999999999999983569.948 + desired_after_crossover = 100000000000000016337.948 + assert_allclose( + sc.wrightomega(x_before_crossover), + desired_before_crossover, + atol=0, + rtol=desired_error, + ) + assert_allclose( + sc.wrightomega(x_after_crossover), + desired_after_crossover, + atol=0, + rtol=desired_error, + ) + + +def test_wrightomega_exp_approximation_crossover(): + desired_error = 2 * np.finfo(float).eps + crossover = -50 + x_before_crossover = np.nextafter(crossover, np.inf) + x_after_crossover = np.nextafter(crossover, -np.inf) + # Computed using Mpmath + desired_before_crossover = 1.9287498479639314876e-22 + desired_after_crossover = 1.9287498479639040784e-22 + assert_allclose( + sc.wrightomega(x_before_crossover), + desired_before_crossover, + atol=0, + rtol=desired_error, + ) + assert_allclose( + sc.wrightomega(x_after_crossover), + desired_after_crossover, + atol=0, + rtol=desired_error, + ) + + +def test_wrightomega_real_versus_complex(): + x = np.linspace(-500, 500, 1001) + results = sc.wrightomega(x + 0j).real + assert_func_equal(sc.wrightomega, results, x, atol=0, rtol=1e-14) diff --git a/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_zeta.py b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_zeta.py new file mode 100644 index 0000000000000000000000000000000000000000..82b3245cac5399e5d6877fe2d8b2ddad585e9c2a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/special/tests/test_zeta.py @@ -0,0 +1,49 @@ +import scipy.special as sc +import numpy as np +from numpy.testing import assert_equal, assert_allclose + + +def test_zeta(): + assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12) + + +def test_zetac(): + # Expected values in the following were computed using Wolfram + # Alpha's `Zeta[x] - 1` + x = [-2.1, 0.8, 0.9999, 9, 50, 75] + desired = [ + -0.9972705002153750, + -5.437538415895550, + -10000.42279161673, + 0.002008392826082214, + 8.881784210930816e-16, + 2.646977960169853e-23, + ] + assert_allclose(sc.zetac(x), desired, rtol=1e-12) + + +def test_zetac_special_cases(): + assert sc.zetac(np.inf) == 0 + assert np.isnan(sc.zetac(-np.inf)) + assert sc.zetac(0) == -1.5 + assert sc.zetac(1.0) == np.inf + + assert_equal(sc.zetac([-2, -50, -100]), -1) + + +def test_riemann_zeta_special_cases(): + assert np.isnan(sc.zeta(np.nan)) + assert sc.zeta(np.inf) == 1 + assert sc.zeta(0) == -0.5 + + # Riemann zeta is zero add negative even integers. + assert_equal(sc.zeta([-2, -4, -6, -8, -10]), 0) + + assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12) + assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12) + + +def test_riemann_zeta_avoid_overflow(): + s = -260.00000000001 + desired = -5.6966307844402683127e+297 # Computed with Mpmath + assert_allclose(sc.zeta(s), desired, atol=0, rtol=5e-14) diff --git a/phi4/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so b/phi4/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cf75cb12106a1754b5c1e48f16a7da530db6f6e3 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf757c12641f0f1bc49aad351a044e89c025d3affa203071f705dfd46dcff375 +size 262168 diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py b/phi4/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py new file mode 100644 index 0000000000000000000000000000000000000000..c87492ce9e77dc2f11b3138f9294e421621a8292 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py @@ -0,0 +1,795 @@ +import builtins +from warnings import catch_warnings, simplefilter +import numpy as np +from operator import index +from collections import namedtuple + +__all__ = ['binned_statistic', + 'binned_statistic_2d', + 'binned_statistic_dd'] + + +BinnedStatisticResult = namedtuple('BinnedStatisticResult', + ('statistic', 'bin_edges', 'binnumber')) + + +def binned_statistic(x, values, statistic='mean', + bins=10, range=None): + """ + Compute a binned statistic for one or more sets of data. + + This is a generalization of a histogram function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a set of sequences - each the same shape as + `x`. If `values` is a set of sequences, the statistic will be computed + on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or sequence of scalars, optional + If `bins` is an int, it defines the number of equal-width bins in the + given range (10 by default). If `bins` is a sequence, it defines the + bin edges, including the rightmost edge, allowing for non-uniform bin + widths. Values in `x` that are smaller than lowest bin edge are + assigned to bin number 0, values beyond the highest bin are assigned to + ``bins[-1]``. If the bin edges are specified, the number of bins will + be, (nx = len(bins)-1). + range : (float, float) or [(float, float)], optional + The lower and upper range of the bins. If not provided, range + is simply ``(x.min(), x.max())``. Values outside the range are + ignored. + + Returns + ------- + statistic : array + The values of the selected statistic in each bin. + bin_edges : array of dtype float + Return the bin edges ``(length(statistic)+1)``. + binnumber: 1-D ndarray of ints + Indices of the bins (corresponding to `bin_edges`) in which each value + of `x` belongs. Same length as `values`. A binnumber of `i` means the + corresponding value is between (bin_edges[i-1], bin_edges[i]). + + See Also + -------- + numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + First some basic examples: + + Create two evenly spaced bins in the range of the given sample, and sum the + corresponding values in each of those bins: + + >>> values = [1.0, 1.0, 2.0, 1.5, 3.0] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + BinnedStatisticResult(statistic=array([4. , 4.5]), + bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2])) + + Multiple arrays of values can also be passed. The statistic is calculated + on each set independently: + + >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + BinnedStatisticResult(statistic=array([[4. , 4.5], + [8. , 9. ]]), bin_edges=array([1., 4., 7.]), + binnumber=array([1, 1, 1, 2, 2])) + + >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', + ... bins=3) + BinnedStatisticResult(statistic=array([1., 2., 4.]), + bin_edges=array([1., 2., 3., 4.]), + binnumber=array([1, 2, 1, 2, 3])) + + As a second example, we now generate some random data of sailing boat speed + as a function of wind speed, and then determine how fast our boat is for + certain wind speeds: + + >>> rng = np.random.default_rng() + >>> windspeed = 8 * rng.random(500) + >>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500) + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, + ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) + >>> plt.figure() + >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, + ... label='binned statistic of data') + >>> plt.legend() + + Now we can use ``binnumber`` to select all datapoints with a windspeed + below 1: + + >>> low_boatspeed = boatspeed[binnumber == 0] + + As a final example, we will use ``bin_edges`` and ``binnumber`` to make a + plot of a distribution that shows the mean and distribution around that + mean per bin, on top of a regular histogram and the probability + distribution function: + + >>> x = np.linspace(0, 5, num=500) + >>> x_pdf = stats.maxwell.pdf(x) + >>> samples = stats.maxwell.rvs(size=10000) + + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, + ... statistic='mean', bins=25) + >>> bin_width = (bin_edges[1] - bin_edges[0]) + >>> bin_centers = bin_edges[1:] - bin_width/2 + + >>> plt.figure() + >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled', + ... alpha=0.2, label='histogram of data') + >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, + ... label='binned statistic of data') + >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) + >>> plt.legend(fontsize=10) + >>> plt.show() + + """ + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1: + bins = [np.asarray(bins, float)] + + if range is not None: + if len(range) == 2: + range = [range] + + medians, edges, binnumbers = binned_statistic_dd( + [x], values, statistic, bins, range) + + return BinnedStatisticResult(medians, edges[0], binnumbers) + + +BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', + ('statistic', 'x_edge', 'y_edge', + 'binnumber')) + + +def binned_statistic_2d(x, y, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False): + """ + Compute a bidimensional binned statistic for one or more sets of data. + + This is a generalization of a histogram2d function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned along the first dimension. + y : (N,) array_like + A sequence of values to be binned along the second dimension. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a list of sequences - each with the same + shape as `x`. If `values` is such a list, the statistic will be + computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or [int, int] or array_like or [array, array], optional + The bin specification: + + * the number of bins for the two dimensions (nx = ny = bins), + * the number of bins in each dimension (nx, ny = bins), + * the bin edges for the two dimensions (x_edge = y_edge = bins), + * the bin edges in each dimension (x_edge, y_edge = bins). + + If the bin edges are specified, the number of bins will be, + (nx = len(x_edge)-1, ny = len(y_edge)-1). + + range : (2,2) array_like, optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be + considered outliers and not tallied in the histogram. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (2,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section. + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : (nx, ny) ndarray + The values of the selected statistic in each two-dimensional bin. + x_edge : (nx + 1) ndarray + The bin edges along the first dimension. + y_edge : (ny + 1) ndarray + The bin edges along the second dimension. + binnumber : (N,) array of ints or (2,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + Note that the returned linearized bin indices are used for an array with + extra bins on the outer binedges to capture values outside of the defined + bin bounds. + If 'True': The returned `binnumber` is a shape (2,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import stats + + Calculate the counts with explicit bin-edges: + + >>> x = [0.1, 0.1, 0.1, 0.6] + >>> y = [2.1, 2.6, 2.1, 2.1] + >>> binx = [0.0, 0.5, 1.0] + >>> biny = [2.0, 2.5, 3.0] + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny]) + >>> ret.statistic + array([[2., 1.], + [1., 0.]]) + + The bin in which each sample is placed is given by the `binnumber` + returned parameter. By default, these are the linearized bin indices: + + >>> ret.binnumber + array([5, 6, 5, 9]) + + The bin indices can also be expanded into separate entries for each + dimension using the `expand_binnumbers` parameter: + + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny], + ... expand_binnumbers=True) + >>> ret.binnumber + array([[1, 1, 1, 2], + [1, 2, 1, 1]]) + + Which shows that the first three elements belong in the xbin 1, and the + fourth into xbin 2; and so on for y. + + """ + + # This code is based on np.histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = np.asarray(bins, float) + bins = [xedges, yedges] + + medians, edges, binnumbers = binned_statistic_dd( + [x, y], values, statistic, bins, range, + expand_binnumbers=expand_binnumbers) + + return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) + + +BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', + ('statistic', 'bin_edges', + 'binnumber')) + + +def _bincount(x, weights): + if np.iscomplexobj(weights): + a = np.bincount(x, np.real(weights)) + b = np.bincount(x, np.imag(weights)) + z = a + b*1j + + else: + z = np.bincount(x, weights) + return z + + +def binned_statistic_dd(sample, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False, + binned_statistic_result=None): + """ + Compute a multidimensional binned statistic for a set of data. + + This is a generalization of a histogramdd function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values within each bin. + + Parameters + ---------- + sample : array_like + Data to histogram passed as a sequence of N arrays of length D, or + as an (N,D) array. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `sample`, or a list of sequences - each with the + same shape as `sample`. If `values` is such a list, the statistic + will be computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. If the number of values + within a given bin is 0 or 1, the computed standard deviation value + will be 0 for the bin. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : sequence or positive int, optional + The bin specification must be in one of the following forms: + + * A sequence of arrays describing the bin edges along each dimension. + * The number of bins for each dimension (nx, ny, ... = bins). + * The number of bins for all dimensions (nx = ny = ... = bins). + range : sequence, optional + A sequence of lower and upper bin edges to be used if the edges are + not given explicitly in `bins`. Defaults to the minimum and maximum + values along each dimension. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (D,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section of + `binned_statistic_2d`. + binned_statistic_result : binnedStatisticddResult + Result of a previous call to the function in order to reuse bin edges + and bin numbers with new values and/or a different statistic. + To reuse bin numbers, `expand_binnumbers` must have been set to False + (the default) + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : ndarray, shape(nx1, nx2, nx3,...) + The values of the selected statistic in each two-dimensional bin. + bin_edges : list of ndarrays + A list of D arrays describing the (nxi + 1) bin edges for each + dimension. + binnumber : (N,) array of ints or (D,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open in each dimension. In + other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is + ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The + last bin, however, is ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + If 'True': The returned `binnumber` is a shape (D,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.mplot3d import Axes3D + + Take an array of 600 (x, y) coordinates as an example. + `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot + of dimension `D+1` is required. + + >>> mu = np.array([0., 1.]) + >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]]) + >>> multinormal = stats.multivariate_normal(mu, sigma) + >>> data = multinormal.rvs(size=600, random_state=235412) + >>> data.shape + (600, 2) + + Create bins and count how many arrays fall in each bin: + + >>> N = 60 + >>> x = np.linspace(-3, 3, N) + >>> y = np.linspace(-3, 4, N) + >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y], + ... statistic='count') + >>> bincounts = ret.statistic + + Set the volume and the location of bars: + + >>> dx = x[1] - x[0] + >>> dy = y[1] - y[0] + >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2) + >>> z = 0 + + >>> bincounts = bincounts.ravel() + >>> x = x.ravel() + >>> y = y.ravel() + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111, projection='3d') + >>> with np.errstate(divide='ignore'): # silence random axes3d warning + ... ax.bar3d(x, y, z, dx, dy, bincounts) + + Reuse bin numbers and bin edges with new values: + + >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600), + ... binned_statistic_result=ret, + ... statistic='mean') + """ + known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max'] + if not callable(statistic) and statistic not in known_stats: + raise ValueError(f'invalid statistic {statistic!r}') + + try: + bins = index(bins) + except TypeError: + # bins is not an integer + pass + # If bins was an integer-like object, now it is an actual Python int. + + # NOTE: for _bin_edges(), see e.g. gh-11365 + if isinstance(bins, int) and not np.isfinite(sample).all(): + raise ValueError(f'{sample!r} contains non-finite values.') + + # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`) + # `Dlen` is the length of elements along each dimension. + # This code is based on np.histogramdd + try: + # `sample` is an ND-array. + Dlen, Ndim = sample.shape + except (AttributeError, ValueError): + # `sample` is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + Dlen, Ndim = sample.shape + + # Store initial shape of `values` to preserve it in the output + values = np.asarray(values) + input_shape = list(values.shape) + # Make sure that `values` is 2D to iterate over rows + values = np.atleast_2d(values) + Vdim, Vlen = values.shape + + # Make sure `values` match `sample` + if statistic != 'count' and Vlen != Dlen: + raise AttributeError('The number of `values` elements must match the ' + 'length of each `sample` dimension.') + + try: + M = len(bins) + if M != Ndim: + raise AttributeError('The dimension of bins must be equal ' + 'to the dimension of the sample x.') + except TypeError: + bins = Ndim * [bins] + + if binned_statistic_result is None: + nbin, edges, dedges = _bin_edges(sample, bins, range) + binnumbers = _bin_numbers(sample, nbin, edges, dedges) + else: + edges = binned_statistic_result.bin_edges + nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)]) + # +1 for outlier bins + dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)] + binnumbers = binned_statistic_result.binnumber + + # Avoid overflow with double precision. Complex `values` -> `complex128`. + result_type = np.result_type(values, np.float64) + result = np.empty([Vdim, nbin.prod()], dtype=result_type) + + if statistic in {'mean', np.mean}: + result.fill(np.nan) + flatcount = _bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + result[vv, a] = flatsum[a] / flatcount[a] + elif statistic in {'std', np.std}: + result.fill(np.nan) + flatcount = _bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers] + std = np.sqrt( + _bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a] + ) + result[vv, a] = std + result = np.real(result) + elif statistic == 'count': + result = np.empty([Vdim, nbin.prod()], dtype=np.float64) + result.fill(0) + flatcount = _bincount(binnumbers, None) + a = np.arange(len(flatcount)) + result[:, a] = flatcount[np.newaxis, :] + elif statistic in {'sum', np.sum}: + result.fill(0) + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + a = np.arange(len(flatsum)) + result[vv, a] = flatsum + elif statistic in {'median', np.median}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.lexsort((values[vv], binnumbers)) + _, j, counts = np.unique(binnumbers[i], + return_index=True, return_counts=True) + mid = j + (counts - 1) / 2 + mid_a = values[vv, i][np.floor(mid).astype(int)] + mid_b = values[vv, i][np.ceil(mid).astype(int)] + medians = (mid_a + mid_b) / 2 + result[vv, binnumbers[i][j]] = medians + elif statistic in {'min', np.min}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.argsort(values[vv])[::-1] # Reversed so the min is last + result[vv, binnumbers[i]] = values[vv, i] + elif statistic in {'max', np.max}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.argsort(values[vv]) + result[vv, binnumbers[i]] = values[vv, i] + elif callable(statistic): + with np.errstate(invalid='ignore'), catch_warnings(): + simplefilter("ignore", RuntimeWarning) + try: + null = statistic([]) + except Exception: + null = np.nan + if np.iscomplexobj(null): + result = result.astype(np.complex128) + result.fill(null) + try: + _calc_binned_statistic( + Vdim, binnumbers, result, values, statistic + ) + except ValueError: + result = result.astype(np.complex128) + _calc_binned_statistic( + Vdim, binnumbers, result, values, statistic + ) + + # Shape into a proper matrix + result = result.reshape(np.append(Vdim, nbin)) + + # Remove outliers (indices 0 and -1 for each bin-dimension). + core = tuple([slice(None)] + Ndim * [slice(1, -1)]) + result = result[core] + + # Unravel binnumbers into an ndarray, each row the bins for each dimension + if expand_binnumbers and Ndim > 1: + binnumbers = np.asarray(np.unravel_index(binnumbers, nbin)) + + if np.any(result.shape[1:] != nbin - 2): + raise RuntimeError('Internal Shape Error') + + # Reshape to have output (`result`) match input (`values`) shape + result = result.reshape(input_shape[:-1] + list(nbin-2)) + + return BinnedStatisticddResult(result, edges, binnumbers) + + +def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func): + unique_bin_numbers = np.unique(bin_numbers) + for vv in builtins.range(Vdim): + bin_map = _create_binned_data(bin_numbers, unique_bin_numbers, + values, vv) + for i in unique_bin_numbers: + stat = stat_func(np.array(bin_map[i])) + if np.iscomplexobj(stat) and not np.iscomplexobj(result): + raise ValueError("The statistic function returns complex ") + result[vv, i] = stat + + +def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv): + """ Create hashmap of bin ids to values in bins + key: bin number + value: list of binned data + """ + bin_map = dict() + for i in unique_bin_numbers: + bin_map[i] = [] + for i in builtins.range(len(bin_numbers)): + bin_map[bin_numbers[i]].append(values[vv, i]) + return bin_map + + +def _bin_edges(sample, bins=None, range=None): + """ Create edge arrays + """ + Dlen, Ndim = sample.shape + + nbin = np.empty(Ndim, int) # Number of bins in each dimension + edges = Ndim * [None] # Bin edges for each dim (will be 2D array) + dedges = Ndim * [None] # Spacing between edges (will be 2D array) + + # Select range for each dimension + # Used only if number of bins is given. + if range is None: + smin = np.atleast_1d(np.array(sample.min(axis=0), float)) + smax = np.atleast_1d(np.array(sample.max(axis=0), float)) + else: + if len(range) != Ndim: + raise ValueError( + f"range given for {len(range)} dimensions; {Ndim} required") + smin = np.empty(Ndim) + smax = np.empty(Ndim) + for i in builtins.range(Ndim): + if range[i][1] < range[i][0]: + raise ValueError( + f"In {f'dimension {i + 1} of ' if Ndim > 1 else ''}range," + " start must be <= stop") + smin[i], smax[i] = range[i] + + # Make sure the bins have a finite width. + for i in builtins.range(len(smin)): + if smin[i] == smax[i]: + smin[i] = smin[i] - .5 + smax[i] = smax[i] + .5 + + # Preserve sample floating point precision in bin edges + edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating) + else float) + + # Create edge arrays + for i in builtins.range(Ndim): + if np.isscalar(bins[i]): + nbin[i] = bins[i] + 2 # +2 for outlier bins + edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1, + dtype=edges_dtype) + else: + edges[i] = np.asarray(bins[i], edges_dtype) + nbin[i] = len(edges[i]) + 1 # +1 for outlier bins + dedges[i] = np.diff(edges[i]) + + nbin = np.asarray(nbin) + + return nbin, edges, dedges + + +def _bin_numbers(sample, nbin, edges, dedges): + """Compute the bin number each sample falls into, in each dimension + """ + Dlen, Ndim = sample.shape + + sampBin = [ + np.digitize(sample[:, i], edges[i]) + for i in range(Ndim) + ] + + # Using `digitize`, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right + # edge to be counted in the last bin, and not as an outlier. + for i in range(Ndim): + # Find the rounding precision + dedges_min = dedges[i].min() + if dedges_min == 0: + raise ValueError('The smallest edge difference is numerically 0.') + decimal = int(-np.log10(dedges_min)) + 6 + # Find which points are on the rightmost edge. + on_edge = np.where((sample[:, i] >= edges[i][-1]) & + (np.around(sample[:, i], decimal) == + np.around(edges[i][-1], decimal)))[0] + # Shift these points one bin to the left. + sampBin[i][on_edge] -= 1 + + # Compute the sample indices in the flattened statistic matrix. + binnumbers = np.ravel_multi_index(sampBin, nbin) + + return binnumbers diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_common.py b/phi4/lib/python3.10/site-packages/scipy/stats/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4011d425cc4afea3c7ee8937526b13f1f92b0850 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_common.py @@ -0,0 +1,5 @@ +from collections import namedtuple + + +ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"]) +ConfidenceInterval. __doc__ = "Class for confidence intervals." diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_constants.py b/phi4/lib/python3.10/site-packages/scipy/stats/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..b539ce8146ebdbc8e08c66143461b04d742804f2 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_constants.py @@ -0,0 +1,42 @@ +""" +Statistics-related constants. + +""" +import numpy as np + + +# The smallest representable positive number such that 1.0 + _EPS != 1.0. +_EPS = np.finfo(float).eps + +# The largest [in magnitude] usable floating value. +_XMAX = np.finfo(float).max + +# The log of the largest usable floating value; useful for knowing +# when exp(something) will overflow +_LOGXMAX = np.log(_XMAX) + +# The smallest [in magnitude] usable (i.e. not subnormal) double precision +# floating value. +_XMIN = np.finfo(float).tiny + +# The log of the smallest [in magnitude] usable (i.e not subnormal) +# double precision floating value. +_LOGXMIN = np.log(_XMIN) + +# -special.psi(1) +_EULER = 0.577215664901532860606512090082402431042 + +# special.zeta(3, 1) Apery's constant +_ZETA3 = 1.202056903159594285399738161511449990765 + +# sqrt(pi) +_SQRT_PI = 1.772453850905516027298167483341145182798 + +# sqrt(2/pi) +_SQRT_2_OVER_PI = 0.7978845608028654 + +# log(pi) +_LOG_PI = 1.1447298858494002 + +# log(sqrt(2/pi)) +_LOG_SQRT_2_OVER_PI = -0.22579135264472744 diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py b/phi4/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py new file mode 100644 index 0000000000000000000000000000000000000000..1e2d8134f8679332a58e1c522332b96e50256116 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py @@ -0,0 +1,4174 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +import sys +import keyword +import re +import types +import warnings +from itertools import zip_longest + +from scipy._lib import doccer +from ._distr_params import distcont, distdiscrete +from scipy._lib._util import check_random_state, _lazywhere + +from scipy.special import comb, entr + + +# for root finding for continuous distribution ppf, and maximum likelihood +# estimation +from scipy import optimize + +# for functions of continuous distributions (e.g. moments, entropy, cdf) +from scipy import integrate + +# to approximate the pdf of a continuous distribution given its cdf +from scipy._lib._finite_differences import _derivative + +# for scipy.stats.entropy. Attempts to import just that function or file +# have cause import problems +from scipy import stats + +from numpy import (arange, putmask, ones, shape, ndarray, zeros, floor, + logical_and, log, sqrt, place, argmax, vectorize, asarray, + nan, inf, isinf, empty) + +import numpy as np +from ._constants import _XMAX, _LOGXMAX +from ._censored_data import CensoredData +from scipy.stats._warnings_errors import FitError + +# These are the docstring parts used for substitution in specific +# distribution docstrings + +docheaders = {'methods': """\nMethods\n-------\n""", + 'notes': """\nNotes\n-----\n""", + 'examples': """\nExamples\n--------\n"""} + +_doc_rvs = """\ +rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None) + Random variates. +""" +_doc_pdf = """\ +pdf(x, %(shapes)s, loc=0, scale=1) + Probability density function. +""" +_doc_logpdf = """\ +logpdf(x, %(shapes)s, loc=0, scale=1) + Log of the probability density function. +""" +_doc_pmf = """\ +pmf(k, %(shapes)s, loc=0, scale=1) + Probability mass function. +""" +_doc_logpmf = """\ +logpmf(k, %(shapes)s, loc=0, scale=1) + Log of the probability mass function. +""" +_doc_cdf = """\ +cdf(x, %(shapes)s, loc=0, scale=1) + Cumulative distribution function. +""" +_doc_logcdf = """\ +logcdf(x, %(shapes)s, loc=0, scale=1) + Log of the cumulative distribution function. +""" +_doc_sf = """\ +sf(x, %(shapes)s, loc=0, scale=1) + Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). +""" # noqa: E501 +_doc_logsf = """\ +logsf(x, %(shapes)s, loc=0, scale=1) + Log of the survival function. +""" +_doc_ppf = """\ +ppf(q, %(shapes)s, loc=0, scale=1) + Percent point function (inverse of ``cdf`` --- percentiles). +""" +_doc_isf = """\ +isf(q, %(shapes)s, loc=0, scale=1) + Inverse survival function (inverse of ``sf``). +""" +_doc_moment = """\ +moment(order, %(shapes)s, loc=0, scale=1) + Non-central moment of the specified order. +""" +_doc_stats = """\ +stats(%(shapes)s, loc=0, scale=1, moments='mv') + Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). +""" +_doc_entropy = """\ +entropy(%(shapes)s, loc=0, scale=1) + (Differential) entropy of the RV. +""" +_doc_fit = """\ +fit(data) + Parameter estimates for generic data. + See `scipy.stats.rv_continuous.fit `__ for detailed documentation of the + keyword arguments. +""" # noqa: E501 +_doc_expect = """\ +expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) + Expected value of a function (of one argument) with respect to the distribution. +""" # noqa: E501 +_doc_expect_discrete = """\ +expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False) + Expected value of a function (of one argument) with respect to the distribution. +""" +_doc_median = """\ +median(%(shapes)s, loc=0, scale=1) + Median of the distribution. +""" +_doc_mean = """\ +mean(%(shapes)s, loc=0, scale=1) + Mean of the distribution. +""" +_doc_var = """\ +var(%(shapes)s, loc=0, scale=1) + Variance of the distribution. +""" +_doc_std = """\ +std(%(shapes)s, loc=0, scale=1) + Standard deviation of the distribution. +""" +_doc_interval = """\ +interval(confidence, %(shapes)s, loc=0, scale=1) + Confidence interval with equal areas around the median. +""" +_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, + _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, + _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, + _doc_stats, _doc_entropy, _doc_fit, + _doc_expect, _doc_median, + _doc_mean, _doc_var, _doc_std, _doc_interval]) + +_doc_default_longsummary = """\ +As an instance of the `rv_continuous` class, `%(name)s` object inherits from it +a collection of generic methods (see below for the full list), +and completes them with details specific for this particular distribution. +""" + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape, +location, and scale parameters returning a "frozen" continuous RV object: + +rv = %(name)s(%(shapes)s, loc=0, scale=1) + - Frozen RV object with the same methods but holding the given shape, + location, and scale fixed. +""" +_doc_default_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability density function (``pdf``): + +>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s), 100) +>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), +... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') + +Alternatively, the distribution object can be called (as a function) +to fix the shape, location and scale parameters. This returns a "frozen" +RV object holding the given parameters fixed. + +Freeze the distribution and display the frozen ``pdf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') + +Check accuracy of ``cdf`` and ``ppf``: + +>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) +>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) + +And compare the histogram: + +>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2) +>>> ax.set_xlim([x[0], x[-1]]) +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +""" + +_doc_default_locscale = """\ +The probability density above is defined in the "standardized" form. To shift +and/or scale the distribution use the ``loc`` and ``scale`` parameters. +Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically +equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with +``y = (x - loc) / scale``. Note that shifting the location of a distribution +does not make it a "noncentral" distribution; noncentral generalizations of +some distributions are available in separate classes. +""" + +_doc_default = ''.join([_doc_default_longsummary, + _doc_allmethods, + '\n', + _doc_default_example]) + +_doc_default_before_notes = ''.join([_doc_default_longsummary, + _doc_allmethods]) + +docdict = { + 'rvs': _doc_rvs, + 'pdf': _doc_pdf, + 'logpdf': _doc_logpdf, + 'cdf': _doc_cdf, + 'logcdf': _doc_logcdf, + 'sf': _doc_sf, + 'logsf': _doc_logsf, + 'ppf': _doc_ppf, + 'isf': _doc_isf, + 'stats': _doc_stats, + 'entropy': _doc_entropy, + 'fit': _doc_fit, + 'moment': _doc_moment, + 'expect': _doc_expect, + 'interval': _doc_interval, + 'mean': _doc_mean, + 'std': _doc_std, + 'var': _doc_var, + 'median': _doc_median, + 'allmethods': _doc_allmethods, + 'longsummary': _doc_default_longsummary, + 'frozennote': _doc_default_frozen_note, + 'example': _doc_default_example, + 'default': _doc_default, + 'before_notes': _doc_default_before_notes, + 'after_notes': _doc_default_locscale +} + +# Reuse common content between continuous and discrete docs, change some +# minor bits. +docdict_discrete = docdict.copy() + +docdict_discrete['pmf'] = _doc_pmf +docdict_discrete['logpmf'] = _doc_logpmf +docdict_discrete['expect'] = _doc_expect_discrete +_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', + 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', + 'mean', 'var', 'std', 'interval'] +for obj in _doc_disc_methods: + docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') + +_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] +for obj in _doc_disc_methods_err_varname: + docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') + +docdict_discrete.pop('pdf') +docdict_discrete.pop('logpdf') + +_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) +docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods + +docdict_discrete['longsummary'] = _doc_default_longsummary.replace( + 'rv_continuous', 'rv_discrete') + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape and +location parameters returning a "frozen" discrete RV object: + +rv = %(name)s(%(shapes)s, loc=0) + - Frozen RV object with the same methods but holding the given shape and + location fixed. +""" +docdict_discrete['frozennote'] = _doc_default_frozen_note + +_doc_default_discrete_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability mass function (``pmf``): + +>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s)) +>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') +>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) + +Alternatively, the distribution object can be called (as a function) +to fix the shape and location. This returns a "frozen" RV object holding +the given parameters fixed. + +Freeze the distribution and display the frozen ``pmf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, +... label='frozen pmf') +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +Check accuracy of ``cdf`` and ``ppf``: + +>>> prob = %(name)s.cdf(x, %(shapes)s) +>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) +""" + + +_doc_default_discrete_locscale = """\ +The probability mass function above is defined in the "standardized" form. +To shift distribution use the ``loc`` parameter. +Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically +equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. +""" + +docdict_discrete['example'] = _doc_default_discrete_example +docdict_discrete['after_notes'] = _doc_default_discrete_locscale + +_doc_default_before_notes = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods']]) +docdict_discrete['before_notes'] = _doc_default_before_notes + +_doc_default_disc = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods'], + docdict_discrete['frozennote'], + docdict_discrete['example']]) +docdict_discrete['default'] = _doc_default_disc + +# clean up all the separate docstring elements, we do not need them anymore +for obj in [s for s in dir() if s.startswith('_doc_')]: + exec('del ' + obj) +del obj + + +def _moment(data, n, mu=None): + if mu is None: + mu = data.mean() + return ((data - mu)**n).mean() + + +def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): + if (n == 0): + return 1.0 + elif (n == 1): + if mu is None: + val = moment_func(1, *args) + else: + val = mu + elif (n == 2): + if mu2 is None or mu is None: + val = moment_func(2, *args) + else: + val = mu2 + mu*mu + elif (n == 3): + if g1 is None or mu2 is None or mu is None: + val = moment_func(3, *args) + else: + mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment + val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment + elif (n == 4): + if g1 is None or g2 is None or mu2 is None or mu is None: + val = moment_func(4, *args) + else: + mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment + mu3 = g1*np.power(mu2, 1.5) # 3rd central moment + val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu + else: + val = moment_func(n, *args) + + return val + + +def _skew(data): + """ + skew is third central moment / variance**(1.5) + """ + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m3 = ((data - mu)**3).mean() + return m3 / np.power(m2, 1.5) + + +def _kurtosis(data): + """Fisher's excess kurtosis is fourth central moment / variance**2 - 3.""" + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m4 = ((data - mu)**4).mean() + return m4 / m2**2 - 3 + +def _vectorize_rvs_over_shapes(_rvs1): + """Decorator that vectorizes _rvs method to work on ndarray shapes""" + # _rvs1 must be a _function_ that accepts _scalar_ args as positional + # arguments, `size` and `random_state` as keyword arguments. + # _rvs1 must return a random variate array with shape `size`. If `size` is + # None, _rvs1 must return a scalar. + # When applied to _rvs1, this decorator broadcasts ndarray args + # and loops over them, calling _rvs1 for each set of scalar args. + # For usage example, see _nchypergeom_gen + def _rvs(*args, size, random_state): + _rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size) + + size = np.array(size) + _rvs1_size = np.array(_rvs1_size) + _rvs1_indices = np.array(_rvs1_indices) + + if np.all(_rvs1_indices): # all args are scalars + return _rvs1(*args, size, random_state) + + out = np.empty(size) + + # out.shape can mix dimensions associated with arg_shape and _rvs1_size + # Sort them to arg_shape + _rvs1_size for easy indexing of dimensions + # corresponding with the different sets of scalar args + j0 = np.arange(out.ndim) + j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices])) + out = np.moveaxis(out, j1, j0) + + for i in np.ndindex(*size[~_rvs1_indices]): + # arg can be squeezed because singleton dimensions will be + # associated with _rvs1_size, not arg_shape per _check_shape + out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args], + _rvs1_size, random_state) + + return np.moveaxis(out, j0, j1) # move axes back before returning + return _rvs + + +def _fit_determine_optimizer(optimizer): + if not callable(optimizer) and isinstance(optimizer, str): + if not optimizer.startswith('fmin_'): + optimizer = "fmin_"+optimizer + if optimizer == 'fmin_': + optimizer = 'fmin' + try: + optimizer = getattr(optimize, optimizer) + except AttributeError as e: + raise ValueError(f"{optimizer} is not a valid optimizer") from e + return optimizer + +def _isintegral(x): + return x == np.round(x) + +def _sum_finite(x): + """ + For a 1D array x, return a tuple containing the sum of the + finite values of x and the number of nonfinite values. + + This is a utility function used when evaluating the negative + loglikelihood for a distribution and an array of samples. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import _sum_finite + >>> tot, nbad = _sum_finite(np.array([-2, -np.inf, 5, 1])) + >>> tot + 4.0 + >>> nbad + 1 + """ + finite_x = np.isfinite(x) + bad_count = finite_x.size - np.count_nonzero(finite_x) + return np.sum(x[finite_x]), bad_count + + +# Frozen RV class +class rv_frozen: + + def __init__(self, dist, *args, **kwds): + self.args = args + self.kwds = kwds + + # create a new instance + self.dist = dist.__class__(**dist._updated_ctor_param()) + + shapes, _, _ = self.dist._parse_args(*args, **kwds) + self.a, self.b = self.dist._get_support(*shapes) + + @property + def random_state(self): + return self.dist._random_state + + @random_state.setter + def random_state(self, seed): + self.dist._random_state = check_random_state(seed) + + def cdf(self, x): + return self.dist.cdf(x, *self.args, **self.kwds) + + def logcdf(self, x): + return self.dist.logcdf(x, *self.args, **self.kwds) + + def ppf(self, q): + return self.dist.ppf(q, *self.args, **self.kwds) + + def isf(self, q): + return self.dist.isf(q, *self.args, **self.kwds) + + def rvs(self, size=None, random_state=None): + kwds = self.kwds.copy() + kwds.update({'size': size, 'random_state': random_state}) + return self.dist.rvs(*self.args, **kwds) + + def sf(self, x): + return self.dist.sf(x, *self.args, **self.kwds) + + def logsf(self, x): + return self.dist.logsf(x, *self.args, **self.kwds) + + def stats(self, moments='mv'): + kwds = self.kwds.copy() + kwds.update({'moments': moments}) + return self.dist.stats(*self.args, **kwds) + + def median(self): + return self.dist.median(*self.args, **self.kwds) + + def mean(self): + return self.dist.mean(*self.args, **self.kwds) + + def var(self): + return self.dist.var(*self.args, **self.kwds) + + def std(self): + return self.dist.std(*self.args, **self.kwds) + + def moment(self, order=None): + return self.dist.moment(order, *self.args, **self.kwds) + + def entropy(self): + return self.dist.entropy(*self.args, **self.kwds) + + def interval(self, confidence=None): + return self.dist.interval(confidence, *self.args, **self.kwds) + + def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): + # expect method only accepts shape parameters as positional args + # hence convert self.args, self.kwds, also loc/scale + # See the .expect method docstrings for the meaning of + # other parameters. + a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) + if isinstance(self.dist, rv_discrete): + return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) + else: + return self.dist.expect(func, a, loc, scale, lb, ub, + conditional, **kwds) + + def support(self): + return self.dist.support(*self.args, **self.kwds) + + +class rv_discrete_frozen(rv_frozen): + + def pmf(self, k): + return self.dist.pmf(k, *self.args, **self.kwds) + + def logpmf(self, k): # No error + return self.dist.logpmf(k, *self.args, **self.kwds) + + +class rv_continuous_frozen(rv_frozen): + + def pdf(self, x): + return self.dist.pdf(x, *self.args, **self.kwds) + + def logpdf(self, x): + return self.dist.logpdf(x, *self.args, **self.kwds) + + +def argsreduce(cond, *args): + """Clean arguments to: + + 1. Ensure all arguments are iterable (arrays of dimension at least one + 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is + True, in 1D. + + Return list of processed arguments. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import argsreduce + >>> rng = np.random.default_rng() + >>> A = rng.random((4, 5)) + >>> B = 2 + >>> C = rng.random((1, 5)) + >>> cond = np.ones(A.shape) + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (4, 5) + >>> B1.shape + (1,) + >>> C1.shape + (1, 5) + >>> cond[2,:] = 0 + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (15,) + >>> B1.shape + (1,) + >>> C1.shape + (15,) + + """ + # some distributions assume arguments are iterable. + newargs = np.atleast_1d(*args) + + # np.atleast_1d returns an array if only one argument, or a list of arrays + # if more than one argument. + if not isinstance(newargs, (list | tuple)): + newargs = (newargs,) + + if np.all(cond): + # broadcast arrays with cond + *newargs, cond = np.broadcast_arrays(*newargs, cond) + return [arg.ravel() for arg in newargs] + + s = cond.shape + # np.extract returns flattened arrays, which are not broadcastable together + # unless they are either the same size or size == 1. + return [(arg if np.size(arg) == 1 + else np.extract(cond, np.broadcast_to(arg, s))) + for arg in newargs] + + +parse_arg_template = """ +def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): + return (%(shape_arg_str)s), %(locscale_out)s + +def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): + return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) + +def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): + return (%(shape_arg_str)s), %(locscale_out)s, moments +""" + + +class rv_generic: + """Class which encapsulates common functionality between rv_discrete + and rv_continuous. + + """ + + def __init__(self, seed=None): + super().__init__() + + # figure out if _stats signature has 'moments' keyword + sig = _getfullargspec(self._stats) + self._stats_has_moments = ((sig.varkw is not None) or + ('moments' in sig.args) or + ('moments' in sig.kwonlyargs)) + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """Get or set the generator object for generating random variates. + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def __setstate__(self, state): + try: + self.__dict__.update(state) + # attaches the dynamically created methods on each instance. + # if a subclass overrides rv_generic.__setstate__, or implements + # it's own _attach_methods, then it must make sure that + # _attach_argparser_methods is called. + self._attach_methods() + except ValueError: + # reconstitute an old pickle scipy<1.6, that contains + # (_ctor_param, random_state) as state + self._ctor_param = state[0] + self._random_state = state[1] + self.__init__() + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_* instance. + + This method must be overridden by subclasses, and must itself call + _attach_argparser_methods. This method is called in __init__ in + subclasses, and in __setstate__ + """ + raise NotImplementedError + + def _attach_argparser_methods(self): + """ + Generates the argument-parsing functions dynamically and attaches + them to the instance. + + Should be called from `_attach_methods`, typically in __init__ and + during unpickling (__setstate__) + """ + ns = {} + exec(self._parse_arg_template, ns) + # NB: attach to the instance, not class + for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: + setattr(self, name, types.MethodType(ns[name], self)) + + def _construct_argparser( + self, meths_to_inspect, locscale_in, locscale_out): + """Construct the parser string for the shape arguments. + + This method should be called in __init__ of a class for each + distribution. It creates the `_parse_arg_template` attribute that is + then used by `_attach_argparser_methods` to dynamically create and + attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs` + methods to the instance. + + If self.shapes is a non-empty string, interprets it as a + comma-separated list of shape parameters. + + Otherwise inspects the call signatures of `meths_to_inspect` + and constructs the argument-parsing functions from these. + In this case also sets `shapes` and `numargs`. + """ + + if self.shapes: + # sanitize the user-supplied shapes + if not isinstance(self.shapes, str): + raise TypeError('shapes must be a string.') + + shapes = self.shapes.replace(',', ' ').split() + + for field in shapes: + if keyword.iskeyword(field): + raise SyntaxError('keywords cannot be used as shapes.') + if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): + raise SyntaxError( + 'shapes must be valid python identifiers') + else: + # find out the call signatures (_pdf, _cdf etc), deduce shape + # arguments. Generic methods only have 'self, x', any further args + # are shapes. + shapes_list = [] + for meth in meths_to_inspect: + shapes_args = _getfullargspec(meth) # NB does not contain self + args = shapes_args.args[1:] # peel off 'x', too + + if args: + shapes_list.append(args) + + # *args or **kwargs are not allowed w/automatic shapes + if shapes_args.varargs is not None: + raise TypeError( + '*args are not allowed w/out explicit shapes') + if shapes_args.varkw is not None: + raise TypeError( + '**kwds are not allowed w/out explicit shapes') + if shapes_args.kwonlyargs: + raise TypeError( + 'kwonly args are not allowed w/out explicit shapes') + if shapes_args.defaults is not None: + raise TypeError('defaults are not allowed for shapes') + + if shapes_list: + shapes = shapes_list[0] + + # make sure the signatures are consistent + for item in shapes_list: + if item != shapes: + raise TypeError('Shape arguments are inconsistent.') + else: + shapes = [] + + # have the arguments, construct the method from template + shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None + dct = dict(shape_arg_str=shapes_str, + locscale_in=locscale_in, + locscale_out=locscale_out, + ) + + # this string is used by _attach_argparser_methods + self._parse_arg_template = parse_arg_template % dct + + self.shapes = ', '.join(shapes) if shapes else None + if not hasattr(self, 'numargs'): + # allows more general subclassing with *args + self.numargs = len(shapes) + + def _construct_doc(self, docdict, shapes_vals=None): + """Construct the instance docstring with string substitutions.""" + tempdict = docdict.copy() + tempdict['name'] = self.name or 'distname' + tempdict['shapes'] = self.shapes or '' + + if shapes_vals is None: + shapes_vals = () + try: + vals = ', '.join(f'{val:.3g}' for val in shapes_vals) + except TypeError: + vals = ', '.join(f'{val}' for val in shapes_vals) + tempdict['vals'] = vals + + tempdict['shapes_'] = self.shapes or '' + if self.shapes and self.numargs == 1: + tempdict['shapes_'] += ',' + + if self.shapes: + tempdict['set_vals_stmt'] = f'>>> {self.shapes} = {vals}' + else: + tempdict['set_vals_stmt'] = '' + + if self.shapes is None: + # remove shapes from call parameters if there are none + for item in ['default', 'before_notes']: + tempdict[item] = tempdict[item].replace( + "\n%(shapes)s : array_like\n shape parameters", "") + for i in range(2): + if self.shapes is None: + # necessary because we use %(shapes)s in two forms (w w/o ", ") + self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") + try: + self.__doc__ = doccer.docformat(self.__doc__, tempdict) + except TypeError as e: + raise Exception("Unable to construct docstring for " + f"distribution \"{self.name}\": {repr(e)}") from e + + # correct for empty shapes + self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') + + def _construct_default_doc(self, longname=None, + docdict=None, discrete='continuous'): + """Construct instance docstring from the default template.""" + if longname is None: + longname = 'A' + self.__doc__ = ''.join([f'{longname} {discrete} random variable.', + '\n\n%(before_notes)s\n', docheaders['notes'], + '\n%(example)s']) + self._construct_doc(docdict) + + def freeze(self, *args, **kwds): + """Freeze the distribution for the given arguments. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution. Should include all + the non-optional arguments, may include ``loc`` and ``scale``. + + Returns + ------- + rv_frozen : rv_frozen instance + The frozen distribution. + + """ + if isinstance(self, rv_continuous): + return rv_continuous_frozen(self, *args, **kwds) + else: + return rv_discrete_frozen(self, *args, **kwds) + + def __call__(self, *args, **kwds): + return self.freeze(*args, **kwds) + __call__.__doc__ = freeze.__doc__ + + # The actual calculation functions (no basic checking need be done) + # If these are defined, the others won't be looked at. + # Otherwise, the other set can be defined. + def _stats(self, *args, **kwds): + return None, None, None, None + + # Noncentral moments (also known as the moment about the origin). + # Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime". + # The primed mu is a widely used notation for the noncentral moment. + def _munp(self, n, *args): + # Silence floating point warnings from integration. + with np.errstate(all='ignore'): + vals = self.generic_moment(n, *args) + return vals + + def _argcheck_rvs(self, *args, **kwargs): + # Handle broadcasting and size validation of the rvs method. + # Subclasses should not have to override this method. + # The rule is that if `size` is not None, then `size` gives the + # shape of the result (integer values of `size` are treated as + # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) + # + # `args` is expected to contain the shape parameters (if any), the + # location and the scale in a flat tuple (e.g. if there are two + # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). + # The only keyword argument expected is 'size'. + size = kwargs.get('size', None) + all_bcast = np.broadcast_arrays(*args) + + def squeeze_left(a): + while a.ndim > 0 and a.shape[0] == 1: + a = a[0] + return a + + # Eliminate trivial leading dimensions. In the convention + # used by numpy's random variate generators, trivial leading + # dimensions are effectively ignored. In other words, when `size` + # is given, trivial leading dimensions of the broadcast parameters + # in excess of the number of dimensions in size are ignored, e.g. + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) + # array([ 1.00104267, 3.00422496, 4.99799278]) + # If `size` is not given, the exact broadcast shape is preserved: + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) + # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) + # + all_bcast = [squeeze_left(a) for a in all_bcast] + bcast_shape = all_bcast[0].shape + bcast_ndim = all_bcast[0].ndim + + if size is None: + size_ = bcast_shape + else: + size_ = tuple(np.atleast_1d(size)) + + # Check compatibility of size_ with the broadcast shape of all + # the parameters. This check is intended to be consistent with + # how the numpy random variate generators (e.g. np.random.normal, + # np.random.beta) handle their arguments. The rule is that, if size + # is given, it determines the shape of the output. Broadcasting + # can't change the output size. + + # This is the standard broadcasting convention of extending the + # shape with fewer dimensions with enough dimensions of length 1 + # so that the two shapes have the same number of dimensions. + ndiff = bcast_ndim - len(size_) + if ndiff < 0: + bcast_shape = (1,)*(-ndiff) + bcast_shape + elif ndiff > 0: + size_ = (1,)*ndiff + size_ + + # This compatibility test is not standard. In "regular" broadcasting, + # two shapes are compatible if for each dimension, the lengths are the + # same or one of the lengths is 1. Here, the length of a dimension in + # size_ must not be less than the corresponding length in bcast_shape. + ok = all([bcdim == 1 or bcdim == szdim + for (bcdim, szdim) in zip(bcast_shape, size_)]) + if not ok: + raise ValueError("size does not match the broadcast shape of " + f"the parameters. {size}, {size_}, {bcast_shape}") + + param_bcast = all_bcast[:-2] + loc_bcast = all_bcast[-2] + scale_bcast = all_bcast[-1] + + return param_bcast, loc_bcast, scale_bcast, size_ + + # These are the methods you must define (standard form functions) + # NB: generic _pdf, _logpdf, _cdf are different for + # rv_continuous and rv_discrete hence are defined in there + def _argcheck(self, *args): + """Default check for correct values on args and keywords. + + Returns condition array of 1's where arguments are correct and + 0's where they are not. + + """ + cond = 1 + for arg in args: + cond = logical_and(cond, (asarray(arg) > 0)) + return cond + + def _get_support(self, *args, **kwargs): + """Return the support of the (unscaled, unshifted) distribution. + + *Must* be overridden by distributions which have support dependent + upon the shape parameters of the distribution. Any such override + *must not* set or change any of the class members, as these members + are shared amongst all instances of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support for the specified + shape parameters. + """ + return self.a, self.b + + def _support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a <= x) & (x <= b) + + def _open_support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a < x) & (x < b) + + def _rvs(self, *args, size=None, random_state=None): + # This method must handle size being a tuple, and it must + # properly broadcast *args and size. size might be + # an empty tuple, which means a scalar random variate is to be + # generated. + + # Use basic inverse cdf algorithm for RV generation as default. + U = random_state.uniform(size=size) + Y = self._ppf(U, *args) + return Y + + def _logcdf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._cdf(x, *args)) + + def _sf(self, x, *args): + return 1.0-self._cdf(x, *args) + + def _logsf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._sf(x, *args)) + + def _ppf(self, q, *args): + return self._ppfvec(q, *args) + + def _isf(self, q, *args): + return self._ppf(1.0-q, *args) # use correct _ppf for subclasses + + # These are actually called, and should not be overwritten if you + # want to keep error checking. + def rvs(self, *args, **kwds): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional + Scale parameter (default=1). + size : int or tuple of ints, optional + Defining number of random variates (default is 1). + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + discrete = kwds.pop('discrete', None) + rndm = kwds.pop('random_state', None) + args, loc, scale, size = self._parse_args_rvs(*args, **kwds) + cond = logical_and(self._argcheck(*args), (scale >= 0)) + if not np.all(cond): + message = ("Domain error in arguments. The `scale` parameter must " + "be positive for all distributions, and many " + "distributions have restrictions on shape parameters. " + f"Please see the `scipy.stats.{self.name}` " + "documentation for details.") + raise ValueError(message) + + if np.all(scale == 0): + return loc*ones(size, 'd') + + # extra gymnastics needed for a custom random_state + if rndm is not None: + random_state_saved = self._random_state + random_state = check_random_state(rndm) + else: + random_state = self._random_state + + vals = self._rvs(*args, size=size, random_state=random_state) + + vals = vals * scale + loc + + # do not forget to restore the _random_state + if rndm is not None: + self._random_state = random_state_saved + + # Cast to int if discrete + if discrete and not isinstance(self, rv_sample): + if size == (): + vals = int(vals) + else: + vals = vals.astype(np.int64) + + return vals + + def stats(self, *args, **kwds): + """Some statistics of the given RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional (continuous RVs only) + scale parameter (default=1) + moments : str, optional + composed of letters ['mvsk'] defining which moments to compute: + 'm' = mean, + 'v' = variance, + 's' = (Fisher's) skew, + 'k' = (Fisher's) kurtosis. + (default is 'mv') + + Returns + ------- + stats : sequence + of requested moments. + + """ + args, loc, scale, moments = self._parse_args_stats(*args, **kwds) + # scale = 1 by construction for discrete RVs + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = [] + default = np.full(shape(cond), fill_value=self.badvalue) + + # Use only entries that are valid in calculation + if np.any(cond): + goodargs = argsreduce(cond, *(args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + + if self._stats_has_moments: + mu, mu2, g1, g2 = self._stats(*goodargs, + **{'moments': moments}) + else: + mu, mu2, g1, g2 = self._stats(*goodargs) + + if 'm' in moments: + if mu is None: + mu = self._munp(1, *goodargs) + out0 = default.copy() + place(out0, cond, mu * scale + loc) + output.append(out0) + + if 'v' in moments: + if mu2 is None: + mu2p = self._munp(2, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + # if mean is inf then var is also inf + with np.errstate(invalid='ignore'): + mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf) + out0 = default.copy() + place(out0, cond, mu2 * scale * scale) + output.append(out0) + + if 's' in moments: + if g1 is None: + mu3p = self._munp(3, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + with np.errstate(invalid='ignore'): + mu3 = (-mu*mu - 3*mu2)*mu + mu3p + g1 = mu3 / np.power(mu2, 1.5) + out0 = default.copy() + place(out0, cond, g1) + output.append(out0) + + if 'k' in moments: + if g2 is None: + mu4p = self._munp(4, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + if g1 is None: + mu3 = None + else: + # (mu2**1.5) breaks down for nan and inf + mu3 = g1 * np.power(mu2, 1.5) + if mu3 is None: + mu3p = self._munp(3, *goodargs) + with np.errstate(invalid='ignore'): + mu3 = (-mu * mu - 3 * mu2) * mu + mu3p + with np.errstate(invalid='ignore'): + mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p + g2 = mu4 / mu2**2.0 - 3.0 + out0 = default.copy() + place(out0, cond, g2) + output.append(out0) + else: # no valid args + output = [default.copy() for _ in moments] + + output = [out[()] for out in output] + if len(output) == 1: + return output[0] + else: + return tuple(output) + + def entropy(self, *args, **kwds): + """Differential entropy of the RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional (continuous distributions only). + Scale parameter (default=1). + + Notes + ----- + Entropy is defined base `e`: + + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import rv_discrete + >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) + >>> np.allclose(drv.entropy(), np.log(2.0)) + True + + """ + args, loc, scale = self._parse_args(*args, **kwds) + # NB: for discrete distributions scale=1 by construction in _parse_args + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = zeros(shape(cond0), 'd') + place(output, (1-cond0), self.badvalue) + goodargs = argsreduce(cond0, scale, *args) + goodscale = goodargs[0] + goodargs = goodargs[1:] + place(output, cond0, self.vecentropy(*goodargs) + log(goodscale)) + return output[()] + + def moment(self, order, *args, **kwds): + """non-central moment of distribution of specified order. + + Parameters + ---------- + order : int, order >= 1 + Order of moment. + arg1, arg2, arg3,... : float + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + """ + n = order + shapes, loc, scale = self._parse_args(*args, **kwds) + args = np.broadcast_arrays(*(*shapes, loc, scale)) + *shapes, loc, scale = args + + i0 = np.logical_and(self._argcheck(*shapes), scale > 0) + i1 = np.logical_and(i0, loc == 0) + i2 = np.logical_and(i0, loc != 0) + + args = argsreduce(i0, *shapes, loc, scale) + *shapes, loc, scale = args + + if (floor(n) != n): + raise ValueError("Moment must be an integer.") + if (n < 0): + raise ValueError("Moment must be positive.") + mu, mu2, g1, g2 = None, None, None, None + if (n > 0) and (n < 5): + if self._stats_has_moments: + mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]} + else: + mdict = {} + mu, mu2, g1, g2 = self._stats(*shapes, **mdict) + val = np.empty(loc.shape) # val needs to be indexed by loc + val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes) + + # Convert to transformed X = L + S*Y + # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) + result = zeros(i0.shape) + place(result, ~i0, self.badvalue) + + if i1.any(): + res1 = scale[loc == 0]**n * val[loc == 0] + place(result, i1, res1) + + if i2.any(): + mom = [mu, mu2, g1, g2] + arrs = [i for i in mom if i is not None] + idx = [i for i in range(4) if mom[i] is not None] + if any(idx): + arrs = argsreduce(loc != 0, *arrs) + j = 0 + for i in idx: + mom[i] = arrs[j] + j += 1 + mu, mu2, g1, g2 = mom + args = argsreduce(loc != 0, *shapes, loc, scale, val) + *shapes, loc, scale, val = args + + res2 = zeros(loc.shape, dtype='d') + fac = scale / loc + for k in range(n): + valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, + shapes) + res2 += comb(n, k, exact=True)*fac**k * valk + res2 += fac**n * val + res2 *= loc**n + place(result, i2, res2) + + return result[()] + + def median(self, *args, **kwds): + """Median of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter, Default is 0. + scale : array_like, optional + Scale parameter, Default is 1. + + Returns + ------- + median : float + The median of the distribution. + + See Also + -------- + rv_discrete.ppf + Inverse of the CDF + + """ + return self.ppf(0.5, *args, **kwds) + + def mean(self, *args, **kwds): + """Mean of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + mean : float + the mean of the distribution + + """ + kwds['moments'] = 'm' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def var(self, *args, **kwds): + """Variance of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + var : float + the variance of the distribution + + """ + kwds['moments'] = 'v' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def std(self, *args, **kwds): + """Standard deviation of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + std : float + standard deviation of the distribution + + """ + kwds['moments'] = 'v' + res = sqrt(self.stats(*args, **kwds)) + return res + + def interval(self, confidence, *args, **kwds): + """Confidence interval with equal areas around the median. + + Parameters + ---------- + confidence : array_like of float + Probability that an rv will be drawn from the returned range. + Each value should be in the range [0, 1]. + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : ndarray of float + end-points of range that contain ``100 * alpha %`` of the rv's + possible values. + + Notes + ----- + This is implemented as ``ppf([p_tail, 1-p_tail])``, where + ``ppf`` is the inverse cumulative distribution function and + ``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a + discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore, + when ``confidence=1`` and the distribution is discrete, the left end + of the interval will be beyond the support of the distribution. + For discrete distributions, the interval will limit the probability + in each tail to be less than or equal to ``p_tail`` (usually + strictly less). + + """ + alpha = confidence + + alpha = asarray(alpha) + if np.any((alpha > 1) | (alpha < 0)): + raise ValueError("alpha must be between 0 and 1 inclusive") + q1 = (1.0-alpha)/2 + q2 = (1.0+alpha)/2 + a = self.ppf(q1, *args, **kwds) + b = self.ppf(q2, *args, **kwds) + return a, b + + def support(self, *args, **kwargs): + """Support of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : array_like + end-points of the distribution's support. + + """ + args, loc, scale = self._parse_args(*args, **kwargs) + arrs = np.broadcast_arrays(*args, loc, scale) + args, loc, scale = arrs[:-2], arrs[-2], arrs[-1] + cond = self._argcheck(*args) & (scale > 0) + _a, _b = self._get_support(*args) + if cond.all(): + return _a * scale + loc, _b * scale + loc + elif cond.ndim == 0: + return self.badvalue, self.badvalue + # promote bounds to at least float to fill in the badvalue + _a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d') + out_a, out_b = _a * scale + loc, _b * scale + loc + place(out_a, 1-cond, self.badvalue) + place(out_b, 1-cond, self.badvalue) + return out_a, out_b + + def nnlf(self, theta, x): + """Negative loglikelihood function. + Notes + ----- + This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the + parameters (including loc and scale). + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (asarray(x)-loc) / scale + n_log_scale = len(x) * log(scale) + if np.any(~self._support_mask(x, *args)): + return inf + return self._nnlf(x, *args) + n_log_scale + + def _nnlf(self, x, *args): + return -np.sum(self._logpxf(x, *args), axis=0) + + def _nlff_and_penalty(self, x, args, log_fitfun): + # negative log fit function + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0, axis=0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + logff = log_fitfun(x, *args) + finite_logff = np.isfinite(logff) + n_bad += np.sum(~finite_logff, axis=0) + if n_bad > 0: + penalty = n_bad * log(_XMAX) * 100 + return -np.sum(logff[finite_logff], axis=0) + penalty + return -np.sum(logff, axis=0) + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = asarray((x-loc) / scale) + n_log_scale = len(x) * log(scale) + return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale + + def _penalized_nlpsf(self, theta, x): + """Penalized negative log product spacing function. + i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty + where theta are the parameters (including loc and scale) + Follows reference [1] of scipy.stats.fit + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (np.sort(x) - loc)/scale + + def log_psf(x, *args): + x, lj = np.unique(x, return_counts=True) # fast for sorted x + cdf_data = self._cdf(x, *args) if x.size else [] + if not (x.size and 1 - cdf_data[-1] <= 0): + cdf = np.concatenate(([0], cdf_data, [1])) + lj = np.concatenate((lj, [1])) + else: + cdf = np.concatenate(([0], cdf_data)) + # here we could use logcdf w/ logsumexp trick to take differences, + # but in the context of the method, it seems unlikely to matter + return lj * np.log(np.diff(cdf) / lj) + + return self._nlff_and_penalty(x, args, log_psf) + + +class _ShapeInfo: + def __init__(self, name, integrality=False, domain=(-np.inf, np.inf), + inclusive=(True, True)): + self.name = name + self.integrality = integrality + self.endpoints = domain + self.inclusive = inclusive + + domain = list(domain) + if np.isfinite(domain[0]) and not inclusive[0]: + domain[0] = np.nextafter(domain[0], np.inf) + if np.isfinite(domain[1]) and not inclusive[1]: + domain[1] = np.nextafter(domain[1], -np.inf) + self.domain = domain + + +def _get_fixed_fit_value(kwds, names): + """ + Given names such as ``['f0', 'fa', 'fix_a']``, check that there is + at most one non-None value in `kwds` associated with those names. + Return that value, or None if none of the names occur in `kwds`. + As a side effect, all occurrences of those names in `kwds` are + removed. + """ + vals = [(name, kwds.pop(name)) for name in names if name in kwds] + if len(vals) > 1: + repeated = [name for name, val in vals] + raise ValueError("fit method got multiple keyword arguments to " + "specify the same fixed parameter: " + + ', '.join(repeated)) + return vals[0][1] if vals else None + + +# continuous random variables: implement maybe later +# +# hf --- Hazard Function (PDF / SF) +# chf --- Cumulative hazard function (-log(SF)) +# psf --- Probability sparsity function (reciprocal of the pdf) in +# units of percent-point-function (as a function of q). +# Also, the derivative of the percent-point function. + + +class rv_continuous(rv_generic): + """A generic continuous random variable class meant for subclassing. + + `rv_continuous` is a base class to construct specific distribution classes + and instances for continuous random variables. It cannot be used + directly as a distribution. + + Parameters + ---------- + momtype : int, optional + The type of generic moment calculation to use: 0 for pdf, 1 (default) + for ppf. + a : float, optional + Lower bound of the support of the distribution, default is minus + infinity. + b : float, optional + Upper bound of the support of the distribution, default is plus + infinity. + xtol : float, optional + The tolerance for fixed point calculation for generic ppf. + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example ``"m, n"`` for a + distribution that takes two integers as the two shape arguments for all + its methods. If not provided, shape parameters will be inferred from + the signature of the private methods, ``_pdf`` and ``_cdf`` of the + instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pdf + logpdf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + fit + fit_loc_scale + nnlf + support + + Notes + ----- + Public methods of an instance of a distribution class (e.g., ``pdf``, + ``cdf``) check their arguments and pass valid arguments to private, + computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid + if it is within the support of the distribution. + Whether a shape parameter is valid is decided by an ``_argcheck`` method + (which defaults to checking that its arguments are strictly positive.) + + **Subclassing** + + New random variables can be defined by subclassing the `rv_continuous` class + and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized + to location 0 and scale 1). + + If positive argument checking is not correct for your RV + then you will also need to re-define the ``_argcheck`` method. + + For most of the scipy.stats distributions, the support interval doesn't + depend on the shape parameters. ``x`` being in the support interval is + equivalent to ``self.a <= x <= self.b``. If either of the endpoints of + the support do depend on the shape parameters, then + i) the distribution must implement the ``_get_support`` method; and + ii) those dependent endpoints must be omitted from the distribution's + call to the ``rv_continuous`` initializer. + + Correct, but potentially slow defaults exist for the remaining + methods but for speed and/or accuracy you can over-ride:: + + _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf + + The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``, + applied to a uniform random variate. In order to generate random variates + efficiently, either the default ``_ppf`` needs to be overwritten (e.g. + if the inverse cdf can expressed in an explicit form) or a sampling + method needs to be implemented in a custom ``_rvs`` method. + + If possible, you should override ``_isf``, ``_sf`` or ``_logsf``. + The main reason would be to improve numerical accuracy: for example, + the survival function ``_sf`` is computed as ``1 - _cdf`` which can + result in loss of precision if ``_cdf(x)`` is close to one. + + **Methods that can be overwritten by subclasses** + :: + + _rvs + _pdf + _cdf + _sf + _ppf + _isf + _stats + _munp + _entropy + _argcheck + _get_support + + There are additional (internal and private) generic methods that can + be useful for cross-checking and for debugging, but might work in all + cases when directly called. + + A note on ``shapes``: subclasses need not specify them explicitly. In this + case, `shapes` will be automatically deduced from the signatures of the + overridden methods (`pdf`, `cdf` etc). + If, for some reason, you prefer to avoid relying on introspection, you can + specify ``shapes`` explicitly as an argument to the instance constructor. + + + **Frozen Distributions** + + Normally, you must provide shape parameters (and, optionally, location and + scale parameters to each call of a method of a distribution. + + Alternatively, the object may be called (as a function) to fix the shape, + location, and scale parameters returning a "frozen" continuous RV object: + + rv = generic(, loc=0, scale=1) + `rv_frozen` object with the same methods but holding the given shape, + location, and scale fixed + + **Statistics** + + Statistics are computed using numerical integration by default. + For speed you can redefine this using ``_stats``: + + - take shape parameters and return mu, mu2, g1, g2 + - If you can't compute one of these, return it as None + - Can also be defined with a keyword argument ``moments``, which is a + string composed of "m", "v", "s", and/or "k". + Only the components appearing in string should be computed and + returned in the order "m", "v", "s", or "k" with missing values + returned as None. + + Alternatively, you can override ``_munp``, which takes ``n`` and shape + parameters and returns the n-th non-central moment of the distribution. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + To create a new Gaussian distribution, we would do the following: + + >>> from scipy.stats import rv_continuous + >>> class gaussian_gen(rv_continuous): + ... "Gaussian distribution" + ... def _pdf(self, x): + ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) + >>> gaussian = gaussian_gen(name='gaussian') + + ``scipy.stats`` distributions are *instances*, so here we subclass + `rv_continuous` and create an instance. With this, we now have + a fully functional distribution with all relevant methods automagically + generated by the framework. + + Note that above we defined a standard normal distribution, with zero mean + and unit variance. Shifting and scaling of the distribution can be done + by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` + essentially computes ``y = (x - loc) / scale`` and + ``gaussian._pdf(y) / scale``. + + """ + + def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, + badvalue=None, name=None, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # save the ctor parameters, cf generic freeze + self._ctor_param = dict( + momtype=momtype, a=a, b=b, xtol=xtol, + badvalue=badvalue, name=name, longname=longname, + shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + if name is None: + name = 'Distribution' + self.badvalue = badvalue + self.name = name + self.a = a + self.b = b + if a is None: + self.a = -inf + if b is None: + self.b = inf + self.xtol = xtol + self.moment_type = momtype + self.shapes = shapes + + self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], + locscale_in='loc=0, scale=1', + locscale_out='loc, scale') + self._attach_methods() + + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict, + discrete='continuous') + else: + dct = dict(distcont) + self._construct_doc(docdict, dct.get(self.name)) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in __setstate__ + # _random_state attribute is taken care of by rv_generic + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "vecentropy", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """ + Attaches dynamically created methods to the rv_continuous instance. + """ + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction + self._ppfvec = vectorize(self._ppf_single, otypes='d') + self._ppfvec.nin = self.numargs + 1 + self.vecentropy = vectorize(self._entropy, otypes='d') + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self._cdfvec.nin = self.numargs + 1 + + if self.moment_type == 0: + self.generic_moment = vectorize(self._mom0_sc, otypes='d') + else: + self.generic_moment = vectorize(self._mom1_sc, otypes='d') + # Because of the *args argument of _mom0_sc, vectorize cannot count the + # number of arguments correctly. + self.generic_moment.nin = self.numargs + 1 + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['xtol'] = self.xtol + dct['badvalue'] = self.badvalue + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _ppf_to_solve(self, x, q, *args): + return self.cdf(*(x, )+args)-q + + def _ppf_single(self, q, *args): + factor = 10. + left, right = self._get_support(*args) + + if np.isinf(left): + left = min(-factor, right) + while self._ppf_to_solve(left, q, *args) > 0.: + left, right = left * factor, left + # left is now such that cdf(left) <= q + # if right has changed, then cdf(right) > q + + if np.isinf(right): + right = max(factor, left) + while self._ppf_to_solve(right, q, *args) < 0.: + left, right = right, right * factor + # right is now such that cdf(right) >= q + + return optimize.brentq(self._ppf_to_solve, + left, right, args=(q,)+args, xtol=self.xtol) + + # moment from definition + def _mom_integ0(self, x, m, *args): + return x**m * self.pdf(x, *args) + + def _mom0_sc(self, m, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._mom_integ0, _a, _b, + args=(m,)+args)[0] + + # moment calculated using ppf + def _mom_integ1(self, q, m, *args): + return (self.ppf(q, *args))**m + + def _mom1_sc(self, m, *args): + return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] + + def _pdf(self, x, *args): + return _derivative(self._cdf, x, dx=1e-5, args=args, order=5) + + # Could also define any of these + def _logpdf(self, x, *args): + p = self._pdf(x, *args) + with np.errstate(divide='ignore'): + return log(p) + + def _logpxf(self, x, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpdf(x, *args) + + def _cdf_single(self, x, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._pdf, _a, x, args=args)[0] + + def _cdf(self, x, *args): + return self._cdfvec(x, *args) + + def _logcdf(self, x, *args): + median = self._ppf(0.5, *args) + with np.errstate(divide='ignore'): + return _lazywhere(x < median, (x,) + args, + f=lambda x, *args: np.log(self._cdf(x, *args)), + f2=lambda x, *args: np.log1p(-self._sf(x, *args))) + + def _logsf(self, x, *args): + median = self._ppf(0.5, *args) + with np.errstate(divide='ignore'): + return _lazywhere(x > median, (x,) + args, + f=lambda x, *args: np.log(self._sf(x, *args)), + f2=lambda x, *args: np.log1p(-self._cdf(x, *args))) + + # generic _argcheck, _sf, _ppf, _isf, _rvs are defined + # in rv_generic + + def pdf(self, x, *args, **kwds): + """Probability density function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + pdf : ndarray + Probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._pdf(*goodargs) / scale) + if output.ndim == 0: + return output[()] + return output + + def logpdf(self, x, *args, **kwds): + """Log of the probability density function at x of the given RV. + + This uses a more numerically accurate calculation if available. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logpdf : array_like + Log of the probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._logpdf(*goodargs) - log(scale)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, x, *args, **kwds): + """ + Cumulative distribution function of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `x` + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= np.asarray(_b)) & cond0 + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._cdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, x, *args, **kwds): + """Log of the cumulative distribution function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= _b) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, x, *args, **kwds): + """Survival function (1 - `cdf`) at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + sf : array_like + Survival function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._sf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, x, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as (1 - `cdf`), + evaluated at `x`. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `x`. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + lower tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 0) + cond3 = cond0 & (q == 1) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._ppf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + upper tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : ndarray or scalar + Quantile corresponding to the upper tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 1) + cond3 = cond0 & (q == 0) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._isf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-2] + scale = theta[-1] + args = tuple(theta[:-2]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _nnlf_and_penalty(self, x, args): + """ + Compute the penalized negative log-likelihood for the + "standardized" data (i.e. already shifted by loc and + scaled by scale) for the shape parameters in `args`. + + `x` can be a 1D numpy array or a CensoredData instance. + """ + if isinstance(x, CensoredData): + # Filter out the data that is not in the support. + xs = x._supported(*self._get_support(*args)) + n_bad = len(x) - len(xs) + i1, i2 = xs._interval.T + terms = [ + # logpdf of the noncensored data. + self._logpdf(xs._uncensored, *args), + # logcdf of the left-censored data. + self._logcdf(xs._left, *args), + # logsf of the right-censored data. + self._logsf(xs._right, *args), + # log of probability of the interval-censored data. + np.log(self._delta_cdf(i1, i2, *args)), + ] + else: + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + terms = [self._logpdf(x, *args)] + + totals, bad_counts = zip(*[_sum_finite(term) for term in terms]) + total = sum(totals) + n_bad += sum(bad_counts) + + return -total + n_bad * _LOGXMAX * 100 + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + if isinstance(x, CensoredData): + x = (x - loc) / scale + n_log_scale = (len(x) - x.num_censored()) * log(scale) + else: + x = (x - loc) / scale + n_log_scale = len(x) * log(scale) + + return self._nnlf_and_penalty(x, args) + n_log_scale + + def _fitstart(self, data, args=None): + """Starting point for fit (shape arguments + loc + scale).""" + if args is None: + args = (1.0,)*self.numargs + loc, scale = self._fit_loc_scale_support(data, *args) + return args + (loc, scale) + + def _reduce_func(self, args, kwds, data=None): + """ + Return the (possibly reduced) function to optimize in order to find MLE + estimates for the .fit method. + """ + # Convert fixed shape parameters to the standard numeric form: e.g. for + # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value + # for `f0`, `fa` or 'fix_a'. The following converts the latter two + # into the first (numeric) form. + shapes = [] + if self.shapes: + shapes = self.shapes.replace(',', ' ').split() + for j, s in enumerate(shapes): + key = 'f' + str(j) + names = [key, 'f' + s, 'fix_' + s] + val = _get_fixed_fit_value(kwds, names) + if val is not None: + kwds[key] = val + + args = list(args) + Nargs = len(args) + fixedn = [] + names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] + x0 = [] + for n, key in enumerate(names): + if key in kwds: + fixedn.append(n) + args[n] = kwds.pop(key) + else: + x0.append(args[n]) + + methods = {"mle", "mm"} + method = kwds.pop('method', "mle").lower() + if method == "mm": + n_params = len(shapes) + 2 - len(fixedn) + exponents = (np.arange(1, n_params+1))[:, np.newaxis] + data_moments = np.sum(data[None, :]**exponents/len(data), axis=1) + + def objective(theta, x): + return self._moment_error(theta, x, data_moments) + + elif method == "mle": + objective = self._penalized_nnlf + else: + raise ValueError(f"Method '{method}' not available; " + f"must be one of {methods}") + + if len(fixedn) == 0: + func = objective + restore = None + else: + if len(fixedn) == Nargs: + raise ValueError( + "All parameters fixed. There is nothing to optimize.") + + def restore(args, theta): + # Replace with theta for all numbers not in fixedn + # This allows the non-fixed values to vary, but + # we still call self.nnlf with all parameters. + i = 0 + for n in range(Nargs): + if n not in fixedn: + args[n] = theta[i] + i += 1 + return args + + def func(theta, x): + newtheta = restore(args[:], theta) + return objective(newtheta, x) + + return x0, func, restore, args + + def _moment_error(self, theta, x, data_moments): + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + + dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale) + for i in range(len(data_moments))]) + if np.any(np.isnan(dist_moments)): + raise ValueError("Method of moments encountered a non-finite " + "distribution moment and cannot continue. " + "Consider trying method='MLE'.") + + return (((data_moments - dist_moments) / + np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + def fit(self, data, *args, **kwds): + r""" + Return estimates of shape (if applicable), location, and scale + parameters from data. The default estimation method is Maximum + Likelihood Estimation (MLE), but Method of Moments (MM) + is also available. + + Starting estimates for the fit are given by input arguments; + for any arguments not provided with starting estimates, + ``self._fitstart(data)`` is called to generate such. + + One can hold some parameters fixed to specific values by passing in + keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) + and ``floc`` and ``fscale`` (for location and scale parameters, + respectively). + + Parameters + ---------- + data : array_like or `CensoredData` instance + Data to use in estimating the distribution parameters. + arg1, arg2, arg3,... : floats, optional + Starting value(s) for any shape-characterizing arguments (those not + provided will be determined by a call to ``_fitstart(data)``). + No default value. + **kwds : floats, optional + - `loc`: initial guess of the distribution's location parameter. + - `scale`: initial guess of the distribution's scale parameter. + + Special keyword arguments are recognized as holding certain + parameters fixed: + + - f0...fn : hold respective shape parameters fixed. + Alternatively, shape parameters to fix can be specified by name. + For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a`` + are equivalent to ``f0``, and ``fb`` and ``fix_b`` are + equivalent to ``f1``. + + - floc : hold location parameter fixed to specified value. + + - fscale : hold scale parameter fixed to specified value. + + - optimizer : The optimizer to use. The optimizer must take + ``func`` and starting position as the first two arguments, + plus ``args`` (for extra arguments to pass to the + function to be optimized) and ``disp``. + The ``fit`` method calls the optimizer with ``disp=0`` to suppress output. + The optimizer must return the estimated parameters. + + - method : The method to use. The default is "MLE" (Maximum + Likelihood Estimate); "MM" (Method of Moments) + is also available. + + Raises + ------ + TypeError, ValueError + If an input is invalid + `~scipy.stats.FitError` + If fitting fails or the fit produced would be invalid + + Returns + ------- + parameter_tuple : tuple of floats + Estimates for any shape parameters (if applicable), followed by + those for location and scale. For most random variables, shape + statistics will be returned, but there are exceptions (e.g. + ``norm``). + + Notes + ----- + With ``method="MLE"`` (default), the fit is computed by minimizing + the negative log-likelihood function. A large, finite penalty + (rather than infinite negative log-likelihood) is applied for + observations beyond the support of the distribution. + + With ``method="MM"``, the fit is computed by minimizing the L2 norm + of the relative errors between the first *k* raw (about zero) data + moments and the corresponding distribution moments, where *k* is the + number of non-fixed parameters. + More precisely, the objective function is:: + + (((data_moments - dist_moments) + / np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + where the constant ``1e-8`` avoids division by zero in case of + vanishing data moments. Typically, this error norm can be reduced to + zero. + Note that the standard method of moments can produce parameters for + which some data are outside the support of the fitted distribution; + this implementation does nothing to prevent this. + + For either method, + the returned answer is not guaranteed to be globally optimal; it + may only be locally optimal, or the optimization may fail altogether. + If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``, + the `fit` method will raise a ``RuntimeError``. + + When passing a ``CensoredData`` instance to ``data``, the log-likelihood + function is defined as: + + .. math:: + + l(\pmb{\theta}; k) & = \sum + \log(f(k_u; \pmb{\theta})) + + \sum + \log(F(k_l; \pmb{\theta})) \\ + & + \sum + \log(1 - F(k_r; \pmb{\theta})) \\ + & + \sum + \log(F(k_{\text{high}, i}; \pmb{\theta}) + - F(k_{\text{low}, i}; \pmb{\theta})) + + where :math:`f` and :math:`F` are the pdf and cdf, respectively, of the + function being fitted, :math:`\pmb{\theta}` is the parameter vector, + :math:`u` are the indices of uncensored observations, + :math:`l` are the indices of left-censored observations, + :math:`r` are the indices of right-censored observations, + subscripts "low"/"high" denote endpoints of interval-censored observations, and + :math:`i` are the indices of interval-censored observations. + + Examples + -------- + + Generate some data to fit: draw random variates from the `beta` + distribution + + >>> import numpy as np + >>> from scipy.stats import beta + >>> a, b = 1., 2. + >>> rng = np.random.default_rng(172786373191770012695001057628748821561) + >>> x = beta.rvs(a, b, size=1000, random_state=rng) + + Now we can fit all four parameters (``a``, ``b``, ``loc`` and + ``scale``): + + >>> a1, b1, loc1, scale1 = beta.fit(x) + >>> a1, b1, loc1, scale1 + (1.0198945204435628, 1.9484708982737828, 4.372241314917588e-05, 0.9979078845964814) + + The fit can be done also using a custom optimizer: + + >>> from scipy.optimize import minimize + >>> def custom_optimizer(func, x0, args=(), disp=0): + ... res = minimize(func, x0, args, method="slsqp", options={"disp": disp}) + ... if res.success: + ... return res.x + ... raise RuntimeError('optimization routine failed') + >>> a1, b1, loc1, scale1 = beta.fit(x, method="MLE", optimizer=custom_optimizer) + >>> a1, b1, loc1, scale1 + (1.0198821087258905, 1.948484145914738, 4.3705304486881485e-05, 0.9979104663953395) + + We can also use some prior knowledge about the dataset: let's keep + ``loc`` and ``scale`` fixed: + + >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) + >>> loc1, scale1 + (0, 1) + + We can also keep shape parameters fixed by using ``f``-keywords. To + keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, + equivalently, ``fa=1``: + + >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) + >>> a1 + 1 + + Not all distributions return estimates for the shape parameters. + ``norm`` for example just returns estimates for location and scale: + + >>> from scipy.stats import norm + >>> x = norm.rvs(a, b, size=1000, random_state=123) + >>> loc1, scale1 = norm.fit(x) + >>> loc1, scale1 + (0.92087172783841631, 2.0015750750324668) + """ # noqa: E501 + method = kwds.get('method', "mle").lower() + + censored = isinstance(data, CensoredData) + if censored: + if method != 'mle': + raise ValueError('For censored data, the method must' + ' be "MLE".') + if data.num_censored() == 0: + # There are no censored values in data, so replace the + # CensoredData instance with a regular array. + data = data._uncensored + censored = False + + Narg = len(args) + if Narg > self.numargs: + raise TypeError("Too many input arguments.") + + # Check the finiteness of data only if data is not an instance of + # CensoredData. The arrays in a CensoredData instance have already + # been validated. + if not censored: + # Note: `ravel()` is called for backwards compatibility. + data = np.asarray(data).ravel() + if not np.isfinite(data).all(): + raise ValueError("The data contains non-finite values.") + + start = [None]*2 + if (Narg < self.numargs) or not ('loc' in kwds and + 'scale' in kwds): + # get distribution specific starting locations + start = self._fitstart(data) + args += start[Narg:-2] + loc = kwds.pop('loc', start[-2]) + scale = kwds.pop('scale', start[-1]) + args += (loc, scale) + x0, func, restore, args = self._reduce_func(args, kwds, data=data) + optimizer = kwds.pop('optimizer', optimize.fmin) + # convert string to function in scipy.optimize + optimizer = _fit_determine_optimizer(optimizer) + # by now kwds must be empty, since everybody took what they needed + if kwds: + raise TypeError(f"Unknown arguments: {kwds}.") + + # In some cases, method of moments can be done with fsolve/root + # instead of an optimizer, but sometimes no solution exists, + # especially when the user fixes parameters. Minimizing the sum + # of squares of the error generalizes to these cases. + vals = optimizer(func, x0, args=(data,), disp=0) + obj = func(vals, data) + + if restore is not None: + vals = restore(args, vals) + vals = tuple(vals) + + loc, scale, shapes = self._unpack_loc_scale(vals) + if not (np.all(self._argcheck(*shapes)) and scale > 0): + raise FitError("Optimization converged to parameters that are " + "outside the range allowed by the distribution.") + + if method == 'mm': + if not np.isfinite(obj): + raise FitError("Optimization failed: either a data moment " + "or fitted distribution moment is " + "non-finite.") + + return vals + + def _fit_loc_scale_support(self, data, *args): + """Estimate loc and scale parameters from data accounting for support. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + if isinstance(data, CensoredData): + # For this estimate, "uncensor" the data by taking the + # given endpoints as the data for the left- or right-censored + # data, and the mean for the interval-censored data. + data = data._uncensor() + else: + data = np.asarray(data) + + # Estimate location and scale according to the method of moments. + loc_hat, scale_hat = self.fit_loc_scale(data, *args) + + # Compute the support according to the shape parameters. + self._argcheck(*args) + _a, _b = self._get_support(*args) + a, b = _a, _b + support_width = b - a + + # If the support is empty then return the moment-based estimates. + if support_width <= 0: + return loc_hat, scale_hat + + # Compute the proposed support according to the loc and scale + # estimates. + a_hat = loc_hat + a * scale_hat + b_hat = loc_hat + b * scale_hat + + # Use the moment-based estimates if they are compatible with the data. + data_a = np.min(data) + data_b = np.max(data) + if a_hat < data_a and data_b < b_hat: + return loc_hat, scale_hat + + # Otherwise find other estimates that are compatible with the data. + data_width = data_b - data_a + rel_margin = 0.1 + margin = data_width * rel_margin + + # For a finite interval, both the location and scale + # should have interesting values. + if support_width < np.inf: + loc_hat = (data_a - a) - margin + scale_hat = (data_width + 2 * margin) / support_width + return loc_hat, scale_hat + + # For a one-sided interval, use only an interesting location parameter. + if a > -np.inf: + return (data_a - a) - margin, 1 + elif b < np.inf: + return (data_b - b) + margin, 1 + else: + raise RuntimeError + + def fit_loc_scale(self, data, *args): + """ + Estimate loc and scale parameters from data using 1st and 2nd moments. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + mu, mu2 = self.stats(*args, **{'moments': 'mv'}) + tmp = asarray(data) + muhat = tmp.mean() + mu2hat = tmp.var() + Shat = sqrt(mu2hat / mu2) + with np.errstate(invalid='ignore'): + Lhat = muhat - Shat*mu + if not np.isfinite(Lhat): + Lhat = 0 + if not (np.isfinite(Shat) and (0 < Shat)): + Shat = 1 + return Lhat, Shat + + def _entropy(self, *args): + def integ(x): + val = self._pdf(x, *args) + return entr(val) + + # upper limit is often inf, so suppress warnings when integrating + _a, _b = self._get_support(*args) + with np.errstate(over='ignore'): + h = integrate.quad(integ, _a, _b)[0] + + if not np.isnan(h): + return h + else: + # try with different limits if integration problems + low, upp = self.ppf([1e-10, 1. - 1e-10], *args) + if np.isinf(_b): + upper = upp + else: + upper = _b + if np.isinf(_a): + lower = low + else: + lower = _a + return integrate.quad(integ, lower, upper)[0] + + def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, + conditional=False, **kwds): + """Calculate expected value of a function with respect to the + distribution by numerical integration. + + The expected value of a function ``f(x)`` with respect to a + distribution ``dist`` is defined as:: + + ub + E[f(x)] = Integral(f(x) * dist.pdf(x)), + lb + + where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)`` + distribution. If the bounds ``lb`` and ``ub`` correspond to the + support of the distribution, e.g. ``[-inf, inf]`` in the default + case, then the integral is the unrestricted expectation of ``f(x)``. + Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0`` + outside a finite interval in which case the expectation is + calculated within the finite range ``[lb, ub]``. + + Parameters + ---------- + func : callable, optional + Function for which integral is calculated. Takes only one argument. + The default is the identity mapping f(x) = x. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter (default=0). + scale : float, optional + Scale parameter (default=1). + lb, ub : scalar, optional + Lower and upper bound for integration. Default is set to the + support of the distribution. + conditional : bool, optional + If True, the integral is corrected by the conditional probability + of the integration interval. The return value is the expectation + of the function, conditional on being in the given interval. + Default is False. + + Additional keyword arguments are passed to the integration routine. + + Returns + ------- + expect : float + The calculated expected value. + + Notes + ----- + The integration behavior of this function is inherited from + `scipy.integrate.quad`. Neither this function nor + `scipy.integrate.quad` can verify whether the integral exists or is + finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and + ``cauchy(0).expect()`` returns ``0.0``. + + Likewise, the accuracy of results is not verified by the function. + `scipy.integrate.quad` is typically reliable for integrals that are + numerically favorable, but it is not guaranteed to converge + to a correct value for all possible intervals and integrands. This + function is provided for convenience; for critical applications, + check results against other integration methods. + + The function is not vectorized. + + Examples + -------- + + To understand the effect of the bounds of integration consider + + >>> from scipy.stats import expon + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0) + 0.6321205588285578 + + This is close to + + >>> expon(1).cdf(2.0) - expon(1).cdf(0.0) + 0.6321205588285577 + + If ``conditional=True`` + + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True) + 1.0000000000000002 + + The slight deviation from 1 is due to numerical integration. + + The integrand can be treated as a complex-valued function + by passing ``complex_func=True`` to `scipy.integrate.quad` . + + >>> import numpy as np + >>> from scipy.stats import vonmises + >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), + ... complex_func=True) + >>> res + (-0.18576377217422957+0.40590124735052263j) + + >>> np.angle(res) # location of the (circular) distribution + 2.0 + + """ + lockwds = {'loc': loc, + 'scale': scale} + self._argcheck(*args) + _a, _b = self._get_support(*args) + if func is None: + def fun(x, *args): + return x * self.pdf(x, *args, **lockwds) + else: + def fun(x, *args): + return func(x) * self.pdf(x, *args, **lockwds) + if lb is None: + lb = loc + _a * scale + if ub is None: + ub = loc + _b * scale + + cdf_bounds = self.cdf([lb, ub], *args, **lockwds) + invfac = cdf_bounds[1] - cdf_bounds[0] + + kwds['args'] = args + + # split interval to help integrator w/ infinite support; see gh-8928 + alpha = 0.05 # split body from tails at probability mass `alpha` + inner_bounds = np.array([alpha, 1-alpha]) + cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds + c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale + + # Do not silence warnings from integration. + lbc = integrate.quad(fun, lb, c, **kwds)[0] + cd = integrate.quad(fun, c, d, **kwds)[0] + dub = integrate.quad(fun, d, ub, **kwds)[0] + vals = (lbc + cd + dub) + + if conditional: + vals /= invfac + return np.array(vals)[()] # make it a numpy scalar like other methods + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False)) + scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False)) + param_info = shape_info + [loc_info, scale_info] + return param_info + + # For now, _delta_cdf is a private method. + def _delta_cdf(self, x1, x2, *args, loc=0, scale=1): + """ + Compute CDF(x2) - CDF(x1). + + Where x1 is greater than the median, compute SF(x1) - SF(x2), + otherwise compute CDF(x2) - CDF(x1). + + This function is only useful if `dist.sf(x, ...)` has an implementation + that is numerically more accurate than `1 - dist.cdf(x, ...)`. + """ + cdf1 = self.cdf(x1, *args, loc=loc, scale=scale) + # Possible optimizations (needs investigation-these might not be + # better): + # * Use _lazywhere instead of np.where + # * Instead of cdf1 > 0.5, compare x1 to the median. + result = np.where(cdf1 > 0.5, + (self.sf(x1, *args, loc=loc, scale=scale) + - self.sf(x2, *args, loc=loc, scale=scale)), + self.cdf(x2, *args, loc=loc, scale=scale) - cdf1) + if result.ndim == 0: + result = result[()] + return result + + +# Helpers for the discrete distributions +def _drv2_moment(self, n, *args): + """Non-central moment of discrete distribution.""" + def fun(x): + return np.power(x, n) * self._pmf(x, *args) + + _a, _b = self._get_support(*args) + return _expect(fun, _a, _b, self._ppf(0.5, *args), self.inc) + + +def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm + _a, _b = self._get_support(*args) + b = _b + a = _a + + step = 10 + if isinf(b): # Be sure ending point is > q + b = float(max(100*q, 10)) + while 1: + if b >= _b: + qb = 1.0 + break + qb = self._cdf(b, *args) + if (qb < q): + b += step + step *= 2 + else: + break + else: + qb = 1.0 + + step = 10 + if isinf(a): # be sure starting point < q + a = float(min(-100*q, -10)) + while 1: + if a <= _a: + qb = 0.0 + break + qa = self._cdf(a, *args) + if (qa > q): + a -= step + step *= 2 + else: + break + else: + qa = self._cdf(a, *args) + + if np.isinf(a) or np.isinf(b): + message = "Arguments that bracket the requested quantile could not be found." + raise RuntimeError(message) + + # maximum number of bisections within the normal float64s + # maxiter = int(np.log2(finfo.max) - np.log2(finfo.smallest_normal)) + maxiter = 2046 + for i in range(maxiter): + if (qa == q): + return a + if (qb == q): + return b + if b <= a+1: + if qa > q: + return a + else: + return b + c = int((a+b)/2.0) + qc = self._cdf(c, *args) + if (qc < q): + if a != c: + a = c + else: + raise RuntimeError('updating stopped, endless loop') + qa = qc + elif (qc > q): + if b != c: + b = c + else: + raise RuntimeError('updating stopped, endless loop') + qb = qc + else: + return c + + +# Must over-ride one of _pmf or _cdf or pass in +# x_k, p(x_k) lists in initialization + + +class rv_discrete(rv_generic): + """A generic discrete random variable class meant for subclassing. + + `rv_discrete` is a base class to construct specific distribution classes + and instances for discrete random variables. It can also be used + to construct an arbitrary distribution defined by a list of support + points and corresponding probabilities. + + Parameters + ---------- + a : float, optional + Lower bound of the support of the distribution, default: 0 + b : float, optional + Upper bound of the support of the distribution, default: plus infinity + moment_tol : float, optional + The tolerance for the generic calculation of moments. + values : tuple of two array_like, optional + ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero + probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk`` + and ``pk`` must have the same shape, and ``xk`` must be unique. + inc : integer, optional + Increment for the support of the distribution. + Default is 1. (other values have not been tested) + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example "m, n" for a distribution + that takes two integers as the two shape arguments for all its methods + If not provided, shape parameters will be inferred from + the signatures of the private methods, ``_pmf`` and ``_cdf`` of + the instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pmf + logpmf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + support + + Notes + ----- + This class is similar to `rv_continuous`. Whether a shape parameter is + valid is decided by an ``_argcheck`` method (which defaults to checking + that its arguments are strictly positive.) + The main differences are as follows. + + - The support of the distribution is a set of integers. + - Instead of the probability density function, ``pdf`` (and the + corresponding private ``_pdf``), this class defines the + *probability mass function*, `pmf` (and the corresponding + private ``_pmf``.) + - There is no ``scale`` parameter. + - The default implementations of methods (e.g. ``_cdf``) are not designed + for distributions with support that is unbounded below (i.e. + ``a=-np.inf``), so they must be overridden. + + To create a new discrete distribution, we would do the following: + + >>> from scipy.stats import rv_discrete + >>> class poisson_gen(rv_discrete): + ... "Poisson distribution" + ... def _pmf(self, k, mu): + ... return exp(-mu) * mu**k / factorial(k) + + and create an instance:: + + >>> poisson = poisson_gen(name="poisson") + + Note that above we defined the Poisson distribution in the standard form. + Shifting the distribution can be done by providing the ``loc`` parameter + to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` + delegates the work to ``poisson._pmf(x-loc, mu)``. + + **Discrete distributions from a list of probabilities** + + Alternatively, you can construct an arbitrary discrete rv defined + on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the + ``values`` keyword argument to the `rv_discrete` constructor. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + Custom made discrete distribution: + + >>> import numpy as np + >>> from scipy import stats + >>> xk = np.arange(7) + >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) + >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) + >>> + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') + >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) + >>> plt.show() + + Random number generation: + + >>> R = custm.rvs(size=100) + + """ + def __new__(cls, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + if values is not None: + # dispatch to a subclass + return super().__new__(rv_sample) + else: + # business as usual + return super().__new__(cls) + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.a = a + self.b = b + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + + if values is not None: + raise ValueError("rv_discrete.__init__(..., values != None, ...)") + + self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + self._attach_methods() + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + # these methods will be remade in __setstate__ + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_discrete instance.""" + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self.vecentropy = vectorize(self._entropy) + + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction needs to be after we know numargs + # correct nin for generic moment vectorization + _vec_generic_moment = vectorize(_drv2_moment, otypes='d') + _vec_generic_moment.nin = self.numargs + 2 + self.generic_moment = types.MethodType(_vec_generic_moment, self) + + # correct nin for ppf vectorization + _vppf = vectorize(_drv2_ppfsingle, otypes='d') + _vppf.nin = self.numargs + 2 + self._ppfvec = types.MethodType(_vppf, self) + + # now that self.numargs is defined, we can adjust nin + self._cdfvec.nin = self.numargs + 1 + + def _construct_docstrings(self, name, longname): + if name is None: + name = 'Distribution' + self.name = name + + # generate docstring for subclass instances + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict_discrete, + discrete='discrete') + else: + dct = dict(distdiscrete) + self._construct_doc(docdict_discrete, dct.get(self.name)) + + # discrete RV do not have the scale parameter, remove it + self.__doc__ = self.__doc__.replace( + '\n scale : array_like, ' + 'optional\n scale parameter (default=1)', '') + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['badvalue'] = self.badvalue + dct['moment_tol'] = self.moment_tol + dct['inc'] = self.inc + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _nonzero(self, k, *args): + return floor(k) == k + + def _pmf(self, k, *args): + return self._cdf(k, *args) - self._cdf(k-1, *args) + + def _logpmf(self, k, *args): + with np.errstate(divide='ignore'): + return log(self._pmf(k, *args)) + + def _logpxf(self, k, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpmf(k, *args) + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-1] + scale = 1 + args = tuple(theta[:-1]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _cdf_single(self, k, *args): + _a, _b = self._get_support(*args) + m = arange(int(_a), k+1) + return np.sum(self._pmf(m, *args), axis=0) + + def _cdf(self, x, *args): + k = floor(x).astype(np.float64) + return self._cdfvec(k, *args) + + # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic + + def rvs(self, *args, **kwargs): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + size : int or tuple of ints, optional + Defining number of random variates (Default is 1). Note that `size` + has to be given as keyword, not as positional argument. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + kwargs['discrete'] = True + return super().rvs(*args, **kwargs) + + def pmf(self, k, *args, **kwds): + """Probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + pmf : array_like + Probability mass function evaluated at k + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logpmf(self, k, *args, **kwds): + """Log of the probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter. Default is 0. + + Returns + ------- + logpmf : array_like + Log of the probability mass function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logpmf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, k, *args, **kwds): + """Cumulative distribution function of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond3 = np.isneginf(k) + cond = cond0 & cond1 & np.isfinite(k) + + output = zeros(shape(cond), 'd') + place(output, cond2*(cond0 == cond0), 1.0) + place(output, cond3*(cond0 == cond0), 0.0) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, k, *args, **kwds): + """Log of the cumulative distribution function at k of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2*(cond0 == cond0), 0.0) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, k, *args, **kwds): + """Survival function (1 - `cdf`) at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + sf : array_like + Survival function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = ((k < _a) | np.isneginf(k)) & cond0 + cond = cond0 & cond1 & np.isfinite(k) + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, k, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as 1 - `cdf`, + evaluated at `k`. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k < _a) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Lower tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : array_like + Quantile corresponding to the lower tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), _a-1 + loc) + place(output, cond2, _b + loc) + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._ppf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Upper tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : ndarray or scalar + Quantile corresponding to the upper tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond3 = (q == 0) & cond0 + cond = cond0 & cond1 + + # same problem as with ppf; copied from ppf and changed + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + lower_bound = _a - 1 + loc + upper_bound = _b + loc + place(output, cond2*(cond == cond), lower_bound) + place(output, cond3*(cond == cond), upper_bound) + + # call place only if at least 1 valid argument + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + # PB same as ticket 766 + place(output, cond, self._isf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def _entropy(self, *args): + if hasattr(self, 'pk'): + return stats.entropy(self.pk) + else: + _a, _b = self._get_support(*args) + return _expect(lambda x: entr(self._pmf(x, *args)), + _a, _b, self._ppf(0.5, *args), self.inc) + + def expect(self, func=None, args=(), loc=0, lb=None, ub=None, + conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): + """ + Calculate expected value of a function with respect to the distribution + for discrete distribution by numerical summation. + + Parameters + ---------- + func : callable, optional + Function for which the expectation value is calculated. + Takes only one argument. + The default is the identity mapping f(k) = k. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter. + Default is 0. + lb, ub : int, optional + Lower and upper bound for the summation, default is set to the + support of the distribution, inclusive (``lb <= k <= ub``). + conditional : bool, optional + If true then the expectation is corrected by the conditional + probability of the summation interval. The return value is the + expectation of the function, `func`, conditional on being in + the given interval (k such that ``lb <= k <= ub``). + Default is False. + maxcount : int, optional + Maximal number of terms to evaluate (to avoid an endless loop for + an infinite sum). Default is 1000. + tolerance : float, optional + Absolute tolerance for the summation. Default is 1e-10. + chunksize : int, optional + Iterate over the support of a distributions in chunks of this size. + Default is 32. + + Returns + ------- + expect : float + Expected value. + + Notes + ----- + For heavy-tailed distributions, the expected value may or + may not exist, + depending on the function, `func`. If it does exist, but the + sum converges + slowly, the accuracy of the result may be rather low. For instance, for + ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. + increasing `maxcount` and/or `chunksize` may improve the result, + but may also make zipf very slow. + + The function is not vectorized. + + """ + # Although `args` is just the shape parameters, `poisson_binom` needs this + # to split the vector-valued shape into a tuple of separate shapes + args, _, _ = self._parse_args(*args) + + if func is None: + def fun(x): + # loc and args from outer scope + return (x+loc)*self._pmf(x, *args) + else: + def fun(x): + # loc and args from outer scope + return func(x+loc)*self._pmf(x, *args) + # used pmf because _pmf does not check support in randint and there + # might be problems(?) with correct self.a, self.b at this stage maybe + # not anymore, seems to work now with _pmf + + _a, _b = self._get_support(*args) + if lb is None: + lb = _a + else: + lb = lb - loc # convert bound for standardized distribution + if ub is None: + ub = _b + else: + ub = ub - loc # convert bound for standardized distribution + if conditional: + invfac = self.sf(lb-1, *args) - self.sf(ub, *args) + else: + invfac = 1.0 + + if isinstance(self, rv_sample): + res = self._expect(fun, lb, ub) + return res / invfac + + # iterate over the support, starting from the median + x0 = self._ppf(0.5, *args) + res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) + return res / invfac + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False)) + param_info = shape_info + [loc_info] + return param_info + + +def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, + chunksize=32): + """Helper for computing the expectation value of `fun`.""" + # short-circuit if the support size is small enough + if (ub - lb) <= chunksize: + supp = np.arange(lb, ub+1, inc) + vals = fun(supp) + return np.sum(vals) + + # otherwise, iterate starting from x0 + if x0 < lb: + x0 = lb + if x0 > ub: + x0 = ub + + count, tot = 0, 0. + # iterate over [x0, ub] inclusive + for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + return tot + + # iterate over [lb, x0) + for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + break + + return tot + + +def _iter_chunked(x0, x1, chunksize=4, inc=1): + """Iterate from x0 to x1 in chunks of chunksize and steps inc. + + x0 must be finite, x1 need not be. In the latter case, the iterator is + infinite. + Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards + (make sure to set inc < 0.) + + >>> from scipy.stats._distn_infrastructure import _iter_chunked + >>> [x for x in _iter_chunked(2, 5, inc=2)] + [array([2, 4])] + >>> [x for x in _iter_chunked(2, 11, inc=2)] + [array([2, 4, 6, 8]), array([10])] + >>> [x for x in _iter_chunked(2, -5, inc=-2)] + [array([ 2, 0, -2, -4])] + >>> [x for x in _iter_chunked(2, -9, inc=-2)] + [array([ 2, 0, -2, -4]), array([-6, -8])] + + """ + if inc == 0: + raise ValueError('Cannot increment by zero.') + if chunksize <= 0: + raise ValueError(f'Chunk size must be positive; got {chunksize}.') + + s = 1 if inc > 0 else -1 + stepsize = abs(chunksize * inc) + + x = np.copy(x0) + while (x - x1) * inc < 0: + delta = min(stepsize, abs(x - x1)) + step = delta * s + supp = np.arange(x, x + step, inc) + x += step + yield supp + + +class rv_sample(rv_discrete): + """A 'sample' discrete distribution defined by the support and values. + + The ctor ignores most of the arguments, only needs the `values` argument. + """ + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super(rv_discrete, self).__init__(seed) + + if values is None: + raise ValueError("rv_sample.__init__(..., values=None,...)") + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + self.vecentropy = self._entropy + + xk, pk = values + + if np.shape(xk) != np.shape(pk): + raise ValueError("xk and pk must have the same shape.") + if np.less(pk, 0.0).any(): + raise ValueError("All elements of pk must be non-negative.") + if not np.allclose(np.sum(pk), 1): + raise ValueError("The sum of provided pk is not 1.") + if not len(set(np.ravel(xk))) == np.size(xk): + raise ValueError("xk may not contain duplicate values.") + + indx = np.argsort(np.ravel(xk)) + self.xk = np.take(np.ravel(xk), indx, 0) + self.pk = np.take(np.ravel(pk), indx, 0) + self.a = self.xk[0] + self.b = self.xk[-1] + + self.qvals = np.cumsum(self.pk, axis=0) + + self.shapes = ' ' # bypass inspection + + self._construct_argparser(meths_to_inspect=[self._pmf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + + self._attach_methods() + + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in rv_generic.__setstate__, + # which calls rv_generic._attach_methods + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"] + [dct.pop(attr, None) for attr in attrs] + + return dct + + def _attach_methods(self): + """Attaches dynamically created argparser methods.""" + self._attach_argparser_methods() + + def _get_support(self, *args): + """Return the support of the (unscaled, unshifted) distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support. + """ + return self.a, self.b + + def _pmf(self, x): + return np.select([x == k for k in self.xk], + [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) + + def _cdf(self, x): + xx, xxk = np.broadcast_arrays(x[:, None], self.xk) + indx = np.argmax(xxk > xx, axis=-1) - 1 + return self.qvals[indx] + + def _ppf(self, q): + qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) + indx = argmax(sqq >= qq, axis=-1) + return self.xk[indx] + + def _rvs(self, size=None, random_state=None): + # Need to define it explicitly, otherwise .rvs() with size=None + # fails due to explicit broadcasting in _ppf + U = random_state.uniform(size=size) + if size is None: + U = np.array(U, ndmin=1) + Y = self._ppf(U)[0] + else: + Y = self._ppf(U) + return Y + + def _entropy(self): + return stats.entropy(self.pk) + + def generic_moment(self, n): + n = asarray(n) + return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) + + def _expect(self, fun, lb, ub, *args, **kwds): + # ignore all args, just do a brute force summation + supp = self.xk[(lb <= self.xk) & (self.xk <= ub)] + vals = fun(supp) + return np.sum(vals) + + +def _check_shape(argshape, size): + """ + This is a utility function used by `_rvs()` in the class geninvgauss_gen. + It compares the tuple argshape to the tuple size. + + Parameters + ---------- + argshape : tuple of integers + Shape of the arguments. + size : tuple of integers or integer + Size argument of rvs(). + + Returns + ------- + The function returns two tuples, scalar_shape and bc. + + scalar_shape : tuple + Shape to which the 1-d array of random variates returned by + _rvs_scalar() is converted when it is copied into the + output array of _rvs(). + + bc : tuple of booleans + bc is an tuple the same length as size. bc[j] is True if the data + associated with that index is generated in one call of _rvs_scalar(). + + """ + scalar_shape = [] + bc = [] + for argdim, sizedim in zip_longest(argshape[::-1], size[::-1], + fillvalue=1): + if sizedim > argdim or (argdim == sizedim == 1): + scalar_shape.append(sizedim) + bc.append(True) + else: + bc.append(False) + return tuple(scalar_shape[::-1]), tuple(bc[::-1]) + + +def get_distribution_names(namespace_pairs, rv_base_class): + """Collect names of statistical distributions and their generators. + + Parameters + ---------- + namespace_pairs : sequence + A snapshot of (name, value) pairs in the namespace of a module. + rv_base_class : class + The base class of random variable generator classes in a module. + + Returns + ------- + distn_names : list of strings + Names of the statistical distributions. + distn_gen_names : list of strings + Names of the generators of the statistical distributions. + Note that these are not simply the names of the statistical + distributions, with a _gen suffix added. + + """ + distn_names = [] + distn_gen_names = [] + for name, value in namespace_pairs: + if name.startswith('_'): + continue + if name.endswith('_gen') and issubclass(value, rv_base_class): + distn_gen_names.append(name) + if isinstance(value, rv_base_class): + distn_names.append(name) + return distn_names, distn_gen_names diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_multivariate.py b/phi4/lib/python3.10/site-packages/scipy/stats/_multivariate.py new file mode 100644 index 0000000000000000000000000000000000000000..be303fe087bd45eb63ee0c9c1b4244fafc1adcdc --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_multivariate.py @@ -0,0 +1,7305 @@ +# +# Author: Joris Vankerschaver 2013 +# +import math +import threading +import numpy as np +import scipy.linalg +from scipy._lib import doccer +from scipy.special import (gammaln, psi, multigammaln, xlogy, entr, betaln, + ive, loggamma) +from scipy import special +from scipy._lib._util import check_random_state, _lazywhere +from scipy.linalg.blas import drot, get_blas_funcs +from ._continuous_distns import norm, invgamma +from ._discrete_distns import binom +from . import _mvn, _covariance, _rcont +from ._qmvnt import _qmvt +from ._morestats import directional_stats +from scipy.optimize import root_scalar + +__all__ = ['multivariate_normal', + 'matrix_normal', + 'dirichlet', + 'dirichlet_multinomial', + 'wishart', + 'invwishart', + 'multinomial', + 'special_ortho_group', + 'ortho_group', + 'random_correlation', + 'unitary_group', + 'multivariate_t', + 'multivariate_hypergeom', + 'random_table', + 'uniform_direction', + 'vonmises_fisher', + 'normal_inverse_gamma'] + +_LOG_2PI = np.log(2 * np.pi) +_LOG_2 = np.log(2) +_LOG_PI = np.log(np.pi) +MVN_LOCK = threading.Lock() + + +_doc_random_state = """\ +seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. +""" + + +def _squeeze_output(out): + """ + Remove single-dimensional entries from array and convert to scalar, + if necessary. + """ + out = out.squeeze() + if out.ndim == 0: + out = out[()] + return out + + +def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): + """Determine which eigenvalues are "small" given the spectrum. + + This is for compatibility across various linear algebra functions + that should agree about whether or not a Hermitian matrix is numerically + singular and what is its numerical matrix rank. + This is designed to be compatible with scipy.linalg.pinvh. + + Parameters + ---------- + spectrum : 1d ndarray + Array of eigenvalues of a Hermitian matrix. + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + + Returns + ------- + eps : float + Magnitude cutoff for numerical negligibility. + + """ + if rcond is not None: + cond = rcond + if cond in [None, -1]: + t = spectrum.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + eps = cond * np.max(abs(spectrum)) + return eps + + +def _pinv_1d(v, eps=1e-5): + """A helper function for computing the pseudoinverse. + + Parameters + ---------- + v : iterable of numbers + This may be thought of as a vector of eigenvalues or singular values. + eps : float + Values with magnitude no greater than eps are considered negligible. + + Returns + ------- + v_pinv : 1d float ndarray + A vector of pseudo-inverted numbers. + + """ + return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) + + +class _PSD: + """ + Compute coordinated functions of a symmetric positive semidefinite matrix. + + This class addresses two issues. Firstly it allows the pseudoinverse, + the logarithm of the pseudo-determinant, and the rank of the matrix + to be computed using one call to eigh instead of three. + Secondly it allows these functions to be computed in a way + that gives mutually compatible results. + All of the functions are computed with a common understanding as to + which of the eigenvalues are to be considered negligibly small. + The functions are designed to coordinate with scipy.linalg.pinvh() + but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). + + Parameters + ---------- + M : array_like + Symmetric positive semidefinite matrix (2-D). + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + lower : bool, optional + Whether the pertinent array data is taken from the lower + or upper triangle of M. (Default: lower) + check_finite : bool, optional + Whether to check that the input matrices contain only finite + numbers. Disabling may give a performance gain, but may result + in problems (crashes, non-termination) if the inputs do contain + infinities or NaNs. + allow_singular : bool, optional + Whether to allow a singular matrix. (Default: True) + + Notes + ----- + The arguments are similar to those of scipy.linalg.pinvh(). + + """ + + def __init__(self, M, cond=None, rcond=None, lower=True, + check_finite=True, allow_singular=True): + self._M = np.asarray(M) + + # Compute the symmetric eigendecomposition. + # Note that eigh takes care of array conversion, chkfinite, + # and assertion that the matrix is square. + s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) + + eps = _eigvalsh_to_eps(s, cond, rcond) + if np.min(s) < -eps: + msg = "The input matrix must be symmetric positive semidefinite." + raise ValueError(msg) + d = s[s > eps] + if len(d) < len(s) and not allow_singular: + msg = ("When `allow_singular is False`, the input matrix must be " + "symmetric positive definite.") + raise np.linalg.LinAlgError(msg) + s_pinv = _pinv_1d(s, eps) + U = np.multiply(u, np.sqrt(s_pinv)) + + # Save the eigenvector basis, and tolerance for testing support + self.eps = 1e3*eps + self.V = u[:, s <= eps] + + # Initialize the eagerly precomputed attributes. + self.rank = len(d) + self.U = U + self.log_pdet = np.sum(np.log(d)) + + # Initialize attributes to be lazily computed. + self._pinv = None + + def _support_mask(self, x): + """ + Check whether x lies in the support of the distribution. + """ + residual = np.linalg.norm(x @ self.V, axis=-1) + in_support = residual < self.eps + return in_support + + @property + def pinv(self): + if self._pinv is None: + self._pinv = np.dot(self.U, self.U.T) + return self._pinv + + +class multi_rv_generic: + """ + Class which encapsulates common functionality between all multivariate + distributions. + """ + def __init__(self, seed=None): + super().__init__() + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """ Get or set the Generator object for generating random variates. + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def _get_random_state(self, random_state): + if random_state is not None: + return check_random_state(random_state) + else: + return self._random_state + + +class multi_rv_frozen: + """ + Class which encapsulates common functionality between all frozen + multivariate distributions. + """ + @property + def random_state(self): + return self._dist._random_state + + @random_state.setter + def random_state(self, seed): + self._dist._random_state = check_random_state(seed) + + +_mvn_doc_default_callparams = """\ +mean : array_like, default: ``[0]`` + Mean of the distribution. +cov : array_like or `Covariance`, default: ``[1]`` + Symmetric positive (semi)definite covariance matrix of the distribution. +allow_singular : bool, default: ``False`` + Whether to allow a singular covariance matrix. This is ignored if `cov` is + a `Covariance` object. +""" + +_mvn_doc_callparams_note = """\ +Setting the parameter `mean` to `None` is equivalent to having `mean` +be the zero-vector. The parameter `cov` can be a scalar, in which case +the covariance matrix is the identity times that value, a vector of +diagonal entries for the covariance matrix, a two-dimensional array_like, +or a `Covariance` object. +""" + +_mvn_doc_frozen_callparams = "" + +_mvn_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mvn_docdict_params = { + '_mvn_doc_default_callparams': _mvn_doc_default_callparams, + '_mvn_doc_callparams_note': _mvn_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvn_docdict_noparams = { + '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams, + '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_normal_gen(multi_rv_generic): + r"""A multivariate normal random variable. + + The `mean` keyword specifies the mean. The `cov` keyword specifies the + covariance matrix. + + Methods + ------- + pdf(x, mean=None, cov=1, allow_singular=False) + Probability density function. + logpdf(x, mean=None, cov=1, allow_singular=False) + Log of the probability density function. + cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5, lower_limit=None) + Cumulative distribution function. + logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5) + Log of the cumulative distribution function. + rvs(mean=None, cov=1, size=1, random_state=None) + Draw random samples from a multivariate normal distribution. + entropy(mean=None, cov=1) + Compute the differential entropy of the multivariate normal. + fit(x, fix_mean=None, fix_cov=None) + Fit a multivariate normal distribution to data. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_mvn_doc_callparams_note)s + + The covariance matrix `cov` may be an instance of a subclass of + `Covariance`, e.g. `scipy.stats.CovViaPrecision`. If so, `allow_singular` + is ignored. + + Otherwise, `cov` must be a symmetric positive semidefinite + matrix when `allow_singular` is True; it must be (strictly) positive + definite when `allow_singular` is False. + Symmetry is not checked; only the lower triangular portion is used. + The determinant and inverse of `cov` are computed + as the pseudo-determinant and pseudo-inverse, respectively, so + that `cov` does not need to have full rank. + + The probability density function for `multivariate_normal` is + + .. math:: + + f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} + \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), + + where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, + :math:`k` the rank of :math:`\Sigma`. In case of singular :math:`\Sigma`, + SciPy extends this definition according to [1]_. + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] Multivariate Normal Distribution - Degenerate Case, Wikipedia, + https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Degenerate_case + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import multivariate_normal + + >>> x = np.linspace(0, 5, 10, endpoint=False) + >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y + array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, + 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) + >>> fig1 = plt.figure() + >>> ax = fig1.add_subplot(111) + >>> ax.plot(x, y) + >>> plt.show() + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" multivariate normal + random variable: + + >>> rv = multivariate_normal(mean=None, cov=1, allow_singular=False) + >>> # Frozen object with the same methods but holding the given + >>> # mean and covariance fixed. + + The input quantiles can be any shape of array, as long as the last + axis labels the components. This allows us for instance to + display the frozen pdf for a non-isotropic random variable in 2D as + follows: + + >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] + >>> pos = np.dstack((x, y)) + >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) + >>> fig2 = plt.figure() + >>> ax2 = fig2.add_subplot(111) + >>> ax2.contourf(x, y, rv.pdf(pos)) + + """ # noqa: E501 + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params) + + def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): + """Create a frozen multivariate normal distribution. + + See `multivariate_normal_frozen` for more information. + """ + return multivariate_normal_frozen(mean, cov, + allow_singular=allow_singular, + seed=seed) + + def _process_parameters(self, mean, cov, allow_singular=True): + """ + Infer dimensionality from mean or covariance matrix, ensure that + mean and covariance are full vector resp. matrix. + """ + if isinstance(cov, _covariance.Covariance): + return self._process_parameters_Covariance(mean, cov) + else: + # Before `Covariance` classes were introduced, + # `multivariate_normal` accepted plain arrays as `cov` and used the + # following input validation. To avoid disturbing the behavior of + # `multivariate_normal` when plain arrays are used, we use the + # original input validation here. + dim, mean, cov = self._process_parameters_psd(None, mean, cov) + # After input validation, some methods then processed the arrays + # with a `_PSD` object and used that to perform computation. + # To avoid branching statements in each method depending on whether + # `cov` is an array or `Covariance` object, we always process the + # array with `_PSD`, and then use wrapper that satisfies the + # `Covariance` interface, `CovViaPSD`. + psd = _PSD(cov, allow_singular=allow_singular) + cov_object = _covariance.CovViaPSD(psd) + return dim, mean, cov_object + + def _process_parameters_Covariance(self, mean, cov): + dim = cov.shape[-1] + mean = np.array([0.]) if mean is None else mean + message = (f"`cov` represents a covariance matrix in {dim} dimensions," + f"and so `mean` must be broadcastable to shape {(dim,)}") + try: + mean = np.broadcast_to(mean, dim) + except ValueError as e: + raise ValueError(message) from e + return dim, mean, cov + + def _process_parameters_psd(self, dim, mean, cov): + # Try to infer dimensionality + if dim is None: + if mean is None: + if cov is None: + dim = 1 + else: + cov = np.asarray(cov, dtype=float) + if cov.ndim < 2: + dim = 1 + else: + dim = cov.shape[0] + else: + mean = np.asarray(mean, dtype=float) + dim = mean.size + else: + if not np.isscalar(dim): + raise ValueError("Dimension of random variable must be " + "a scalar.") + + # Check input sizes and return full arrays for mean and cov if + # necessary + if mean is None: + mean = np.zeros(dim) + mean = np.asarray(mean, dtype=float) + + if cov is None: + cov = 1.0 + cov = np.asarray(cov, dtype=float) + + if dim == 1: + mean = mean.reshape(1) + cov = cov.reshape(1, 1) + + if mean.ndim != 1 or mean.shape[0] != dim: + raise ValueError("Array 'mean' must be a vector of length %d." % + dim) + if cov.ndim == 0: + cov = cov * np.eye(dim) + elif cov.ndim == 1: + cov = np.diag(cov) + elif cov.ndim == 2 and cov.shape != (dim, dim): + rows, cols = cov.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + f" but cov.shape = {str(cov.shape)}.") + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'mean' is a vector of length %d.") + msg = msg % (str(cov.shape), len(mean)) + raise ValueError(msg) + elif cov.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % cov.ndim) + + return dim, mean, cov + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + + return x + + def _logpdf(self, x, mean, cov_object): + """Log of the multivariate normal probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + cov_object : Covariance + An object representing the Covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + log_det_cov, rank = cov_object.log_pdet, cov_object.rank + dev = x - mean + if dev.ndim > 1: + log_det_cov = log_det_cov[..., np.newaxis] + rank = rank[..., np.newaxis] + maha = np.sum(np.square(cov_object.whiten(dev)), axis=-1) + return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) + + def logpdf(self, x, mean=None, cov=1, allow_singular=False): + """Log of the multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + x = self._process_quantiles(x, dim) + out = self._logpdf(x, mean, cov_object) + if np.any(cov_object.rank < dim): + out_of_bounds = ~cov_object._support_mask(x-mean) + out[out_of_bounds] = -np.inf + return _squeeze_output(out) + + def pdf(self, x, mean=None, cov=1, allow_singular=False): + """Multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + x = self._process_quantiles(x, dim) + out = np.exp(self._logpdf(x, mean, cov_object)) + if np.any(cov_object.rank < dim): + out_of_bounds = ~cov_object._support_mask(x-mean) + out[out_of_bounds] = 0.0 + return _squeeze_output(out) + + def _cdf(self, x, mean, cov, maxpts, abseps, releps, lower_limit): + """Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the cumulative distribution function. + mean : ndarray + Mean of the distribution + cov : array_like + Covariance matrix of the distribution + maxpts : integer + The maximum number of points to use for integration + abseps : float + Absolute error tolerance + releps : float + Relative error tolerance + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'cdf' instead. + + + .. versionadded:: 1.0.0 + + """ + lower = (np.full(mean.shape, -np.inf) + if lower_limit is None else lower_limit) + # In 2d, _mvn.mvnun accepts input in which `lower` bound elements + # are greater than `x`. Not so in other dimensions. Fix this by + # ensuring that lower bounds are indeed lower when passed, then + # set signs of resulting CDF manually. + b, a = np.broadcast_arrays(x, lower) + i_swap = b < a + signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a, b = a.copy(), b.copy() + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + n = x.shape[-1] + limits = np.concatenate((a, b), axis=-1) + + # mvnun expects 1-d arguments, so process points sequentially + def func1d(limits): + with MVN_LOCK: + return _mvn.mvnun(limits[:n], limits[n:], mean, cov, + maxpts, abseps, releps)[0] + + out = np.apply_along_axis(func1d, -1, limits) * signs + return _squeeze_output(out) + + def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5, *, lower_limit=None): + """Log of the multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts : integer, optional + The maximum number of points to use for integration + (default ``1000000*dim``) + abseps : float, optional + Absolute error tolerance (default 1e-5) + releps : float, optional + Relative error tolerance (default 1e-5) + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Returns + ------- + cdf : ndarray or scalar + Log of the cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + cov = cov_object.covariance + x = self._process_quantiles(x, dim) + if not maxpts: + maxpts = 1000000 * dim + cdf = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit) + # the log of a negative real is complex, and cdf can be negative + # if lower limit is greater than upper limit + cdf = cdf + 0j if np.any(cdf < 0) else cdf + out = np.log(cdf) + return out + + def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5, *, lower_limit=None): + """Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts : integer, optional + The maximum number of points to use for integration + (default ``1000000*dim``) + abseps : float, optional + Absolute error tolerance (default 1e-5) + releps : float, optional + Relative error tolerance (default 1e-5) + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + cov = cov_object.covariance + x = self._process_quantiles(x, dim) + if not maxpts: + maxpts = 1000000 * dim + out = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit) + return out + + def rvs(self, mean=None, cov=1, size=1, random_state=None): + """Draw random samples from a multivariate normal distribution. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov_object = self._process_parameters(mean, cov) + random_state = self._get_random_state(random_state) + + if isinstance(cov_object, _covariance.CovViaPSD): + cov = cov_object.covariance + out = random_state.multivariate_normal(mean, cov, size) + out = _squeeze_output(out) + else: + size = size or tuple() + if not np.iterable(size): + size = (size,) + shape = tuple(size) + (cov_object.shape[-1],) + x = random_state.normal(size=shape) + out = mean + cov_object.colorize(x) + return out + + def entropy(self, mean=None, cov=1): + """Compute the differential entropy of the multivariate normal. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov_object = self._process_parameters(mean, cov) + return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet) + + def fit(self, x, fix_mean=None, fix_cov=None): + """Fit a multivariate normal distribution to data. + + Parameters + ---------- + x : ndarray (m, n) + Data the distribution is fitted to. Must have two axes. + The first axis of length `m` represents the number of vectors + the distribution is fitted to. The second axis of length `n` + determines the dimensionality of the fitted distribution. + fix_mean : ndarray(n, ) + Fixed mean vector. Must have length `n`. + fix_cov: ndarray (n, n) + Fixed covariance matrix. Must have shape ``(n, n)``. + + Returns + ------- + mean : ndarray (n, ) + Maximum likelihood estimate of the mean vector + cov : ndarray (n, n) + Maximum likelihood estimate of the covariance matrix + + """ + # input validation for data to be fitted + x = np.asarray(x) + if x.ndim != 2: + raise ValueError("`x` must be two-dimensional.") + + n_vectors, dim = x.shape + + # parameter estimation + # reference: https://home.ttic.edu/~shubhendu/Slides/Estimation.pdf + if fix_mean is not None: + # input validation for `fix_mean` + fix_mean = np.atleast_1d(fix_mean) + if fix_mean.shape != (dim, ): + msg = ("`fix_mean` must be a one-dimensional array the same " + "length as the dimensionality of the vectors `x`.") + raise ValueError(msg) + mean = fix_mean + else: + mean = x.mean(axis=0) + + if fix_cov is not None: + # input validation for `fix_cov` + fix_cov = np.atleast_2d(fix_cov) + # validate shape + if fix_cov.shape != (dim, dim): + msg = ("`fix_cov` must be a two-dimensional square array " + "of same side length as the dimensionality of the " + "vectors `x`.") + raise ValueError(msg) + # validate positive semidefiniteness + # a trimmed down copy from _PSD + s, u = scipy.linalg.eigh(fix_cov, lower=True, check_finite=True) + eps = _eigvalsh_to_eps(s) + if np.min(s) < -eps: + msg = "`fix_cov` must be symmetric positive semidefinite." + raise ValueError(msg) + cov = fix_cov + else: + centered_data = x - mean + cov = centered_data.T @ centered_data / n_vectors + return mean, cov + + +multivariate_normal = multivariate_normal_gen() + + +class multivariate_normal_frozen(multi_rv_frozen): + def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, + maxpts=None, abseps=1e-5, releps=1e-5): + """Create a frozen multivariate normal distribution. + + Parameters + ---------- + mean : array_like, default: ``[0]`` + Mean of the distribution. + cov : array_like, default: ``[1]`` + Symmetric positive (semi)definite covariance matrix of the + distribution. + allow_singular : bool, default: ``False`` + Whether to allow a singular covariance matrix. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + maxpts : integer, optional + The maximum number of points to use for integration of the + cumulative distribution function (default ``1000000*dim``) + abseps : float, optional + Absolute error tolerance for the cumulative distribution function + (default 1e-5) + releps : float, optional + Relative error tolerance for the cumulative distribution function + (default 1e-5) + + Examples + -------- + When called with the default parameters, this will create a 1D random + variable with mean 0 and covariance 1: + + >>> from scipy.stats import multivariate_normal + >>> r = multivariate_normal() + >>> r.mean + array([ 0.]) + >>> r.cov + array([[1.]]) + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = multivariate_normal_gen(seed) + self.dim, self.mean, self.cov_object = ( + self._dist._process_parameters(mean, cov, allow_singular)) + self.allow_singular = allow_singular or self.cov_object._allow_singular + if not maxpts: + maxpts = 1000000 * self.dim + self.maxpts = maxpts + self.abseps = abseps + self.releps = releps + + @property + def cov(self): + return self.cov_object.covariance + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.mean, self.cov_object) + if np.any(self.cov_object.rank < self.dim): + out_of_bounds = ~self.cov_object._support_mask(x-self.mean) + out[out_of_bounds] = -np.inf + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def logcdf(self, x, *, lower_limit=None): + cdf = self.cdf(x, lower_limit=lower_limit) + # the log of a negative real is complex, and cdf can be negative + # if lower limit is greater than upper limit + cdf = cdf + 0j if np.any(cdf < 0) else cdf + out = np.log(cdf) + return out + + def cdf(self, x, *, lower_limit=None): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._cdf(x, self.mean, self.cov_object.covariance, + self.maxpts, self.abseps, self.releps, + lower_limit) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.cov_object, size, random_state) + + def entropy(self): + """Computes the differential entropy of the multivariate normal. + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + """ + log_pdet = self.cov_object.log_pdet + rank = self.cov_object.rank + return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']: + method = multivariate_normal_gen.__dict__[name] + method_frozen = multivariate_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params) + +_matnorm_doc_default_callparams = """\ +mean : array_like, optional + Mean of the distribution (default: `None`) +rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: ``1``) +colcov : array_like, optional + Among-column covariance matrix of the distribution (default: ``1``) +""" + +_matnorm_doc_callparams_note = """\ +If `mean` is set to `None` then a matrix of zeros is used for the mean. +The dimensions of this matrix are inferred from the shape of `rowcov` and +`colcov`, if these are provided, or set to ``1`` if ambiguous. + +`rowcov` and `colcov` can be two-dimensional array_likes specifying the +covariance matrices directly. Alternatively, a one-dimensional array will +be be interpreted as the entries of a diagonal matrix, and a scalar or +zero-dimensional array will be interpreted as this value times the +identity matrix. +""" + +_matnorm_doc_frozen_callparams = "" + +_matnorm_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +matnorm_docdict_params = { + '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +matnorm_docdict_noparams = { + '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class matrix_normal_gen(multi_rv_generic): + r"""A matrix normal random variable. + + The `mean` keyword specifies the mean. The `rowcov` keyword specifies the + among-row covariance matrix. The 'colcov' keyword specifies the + among-column covariance matrix. + + Methods + ------- + pdf(X, mean=None, rowcov=1, colcov=1) + Probability density function. + logpdf(X, mean=None, rowcov=1, colcov=1) + Log of the probability density function. + rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None) + Draw random samples. + entropy(rowcol=1, colcov=1) + Differential entropy. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + The covariance matrices specified by `rowcov` and `colcov` must be + (symmetric) positive definite. If the samples in `X` are + :math:`m \times n`, then `rowcov` must be :math:`m \times m` and + `colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`. + + The probability density function for `matrix_normal` is + + .. math:: + + f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}} + \exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1} + (X-M)^T \right] \right), + + where :math:`M` is the mean, :math:`U` the among-row covariance matrix, + :math:`V` the among-column covariance matrix. + + The `allow_singular` behaviour of the `multivariate_normal` + distribution is not currently supported. Covariance matrices must be + full rank. + + The `matrix_normal` distribution is closely related to the + `multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)` + (the vector formed by concatenating the columns of :math:`X`) has a + multivariate normal distribution with mean :math:`\mathrm{Vec}(M)` + and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker + product). Sampling and pdf evaluation are + :math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but + :math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal, + making this equivalent form algorithmically inefficient. + + .. versionadded:: 0.17.0 + + Examples + -------- + + >>> import numpy as np + >>> from scipy.stats import matrix_normal + + >>> M = np.arange(6).reshape(3,2); M + array([[0, 1], + [2, 3], + [4, 5]]) + >>> U = np.diag([1,2,3]); U + array([[1, 0, 0], + [0, 2, 0], + [0, 0, 3]]) + >>> V = 0.3*np.identity(2); V + array([[ 0.3, 0. ], + [ 0. , 0.3]]) + >>> X = M + 0.1; X + array([[ 0.1, 1.1], + [ 2.1, 3.1], + [ 4.1, 5.1]]) + >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) + 0.023410202050005054 + + >>> # Equivalent multivariate normal + >>> from scipy.stats import multivariate_normal + >>> vectorised_X = X.T.flatten() + >>> equiv_mean = M.T.flatten() + >>> equiv_cov = np.kron(V,U) + >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov) + 0.023410202050005054 + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" matrix normal + random variable: + + >>> rv = matrix_normal(mean=None, rowcov=1, colcov=1) + >>> # Frozen object with the same methods but holding the given + >>> # mean and covariance fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params) + + def __call__(self, mean=None, rowcov=1, colcov=1, seed=None): + """Create a frozen matrix normal distribution. + + See `matrix_normal_frozen` for more information. + + """ + return matrix_normal_frozen(mean, rowcov, colcov, seed=seed) + + def _process_parameters(self, mean, rowcov, colcov): + """ + Infer dimensionality from mean or covariance matrices. Handle + defaults. Ensure compatible dimensions. + """ + + # Process mean + if mean is not None: + mean = np.asarray(mean, dtype=float) + meanshape = mean.shape + if len(meanshape) != 2: + raise ValueError("Array `mean` must be two dimensional.") + if np.any(meanshape == 0): + raise ValueError("Array `mean` has invalid shape.") + + # Process among-row covariance + rowcov = np.asarray(rowcov, dtype=float) + if rowcov.ndim == 0: + if mean is not None: + rowcov = rowcov * np.identity(meanshape[0]) + else: + rowcov = rowcov * np.identity(1) + elif rowcov.ndim == 1: + rowcov = np.diag(rowcov) + rowshape = rowcov.shape + if len(rowshape) != 2: + raise ValueError("`rowcov` must be a scalar or a 2D array.") + if rowshape[0] != rowshape[1]: + raise ValueError("Array `rowcov` must be square.") + if rowshape[0] == 0: + raise ValueError("Array `rowcov` has invalid shape.") + numrows = rowshape[0] + + # Process among-column covariance + colcov = np.asarray(colcov, dtype=float) + if colcov.ndim == 0: + if mean is not None: + colcov = colcov * np.identity(meanshape[1]) + else: + colcov = colcov * np.identity(1) + elif colcov.ndim == 1: + colcov = np.diag(colcov) + colshape = colcov.shape + if len(colshape) != 2: + raise ValueError("`colcov` must be a scalar or a 2D array.") + if colshape[0] != colshape[1]: + raise ValueError("Array `colcov` must be square.") + if colshape[0] == 0: + raise ValueError("Array `colcov` has invalid shape.") + numcols = colshape[0] + + # Ensure mean and covariances compatible + if mean is not None: + if meanshape[0] != numrows: + raise ValueError("Arrays `mean` and `rowcov` must have the " + "same number of rows.") + if meanshape[1] != numcols: + raise ValueError("Arrays `mean` and `colcov` must have the " + "same number of columns.") + else: + mean = np.zeros((numrows, numcols)) + + dims = (numrows, numcols) + + return dims, mean, rowcov, colcov + + def _process_quantiles(self, X, dims): + """ + Adjust quantiles array so that last two axes labels the components of + each data point. + """ + X = np.asarray(X, dtype=float) + if X.ndim == 2: + X = X[np.newaxis, :] + if X.shape[-2:] != dims: + raise ValueError("The shape of array `X` is not compatible " + "with the distribution parameters.") + return X + + def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov, + col_prec_rt, log_det_colcov): + """Log of the matrix normal probability density function. + + Parameters + ---------- + dims : tuple + Dimensions of the matrix variates + X : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + row_prec_rt : ndarray + A decomposition such that np.dot(row_prec_rt, row_prec_rt.T) + is the inverse of the among-row covariance matrix + log_det_rowcov : float + Logarithm of the determinant of the among-row covariance matrix + col_prec_rt : ndarray + A decomposition such that np.dot(col_prec_rt, col_prec_rt.T) + is the inverse of the among-column covariance matrix + log_det_colcov : float + Logarithm of the determinant of the among-column covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + numrows, numcols = dims + roll_dev = np.moveaxis(X-mean, -1, 0) + scale_dev = np.tensordot(col_prec_rt.T, + np.dot(roll_dev, row_prec_rt), 1) + maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0) + return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov + + numrows*log_det_colcov + maha) + + def logpdf(self, X, mean=None, rowcov=1, colcov=1): + """Log of the matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + logpdf : ndarray + Log of the probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + X = self._process_quantiles(X, dims) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, + colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X, mean=None, rowcov=1, colcov=1): + """Matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + return np.exp(self.logpdf(X, mean, rowcov, colcov)) + + def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None): + """Draw random samples from a matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `dims`), where `dims` is the + dimension of the random matrices. + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + size = int(size) + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + rowchol = scipy.linalg.cholesky(rowcov, lower=True) + colchol = scipy.linalg.cholesky(colcov, lower=True) + random_state = self._get_random_state(random_state) + # We aren't generating standard normal variates with size=(size, + # dims[0], dims[1]) directly to ensure random variates remain backwards + # compatible. See https://github.com/scipy/scipy/pull/12312 for more + # details. + std_norm = random_state.standard_normal( + size=(dims[1], size, dims[0]) + ).transpose(1, 2, 0) + out = mean + np.einsum('jp,ipq,kq->ijk', + rowchol, std_norm, colchol, + optimize=True) + if size == 1: + out = out.reshape(mean.shape) + return out + + def entropy(self, rowcov=1, colcov=1): + """Log of the matrix normal probability density function. + + Parameters + ---------- + rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: ``1``) + colcov : array_like, optional + Among-column covariance matrix of the distribution (default: ``1``) + + Returns + ------- + entropy : float + Entropy of the distribution + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0])) + dims, _, rowcov, colcov = self._process_parameters(dummy_mean, + rowcov, + colcov) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + + return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet) + + def _entropy(self, dims, row_cov_logdet, col_cov_logdet): + n, p = dims + return (0.5 * n * p * (1 + _LOG_2PI) + 0.5 * p * row_cov_logdet + + 0.5 * n * col_cov_logdet) + + +matrix_normal = matrix_normal_gen() + + +class matrix_normal_frozen(multi_rv_frozen): + """ + Create a frozen matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is `None` the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import matrix_normal + + >>> distn = matrix_normal(mean=np.zeros((3,3))) + >>> X = distn.rvs(); X + array([[-0.02976962, 0.93339138, -0.09663178], + [ 0.67405524, 0.28250467, -0.93308929], + [-0.31144782, 0.74535536, 1.30412916]]) + >>> distn.pdf(X) + 2.5160642368346784e-05 + >>> distn.logpdf(X) + -10.590229595124615 + """ + + def __init__(self, mean=None, rowcov=1, colcov=1, seed=None): + self._dist = matrix_normal_gen(seed) + self.dims, self.mean, self.rowcov, self.colcov = \ + self._dist._process_parameters(mean, rowcov, colcov) + self.rowpsd = _PSD(self.rowcov, allow_singular=False) + self.colpsd = _PSD(self.colcov, allow_singular=False) + + def logpdf(self, X): + X = self._dist._process_quantiles(X, self.dims) + out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U, + self.rowpsd.log_pdet, self.colpsd.U, + self.colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X): + return np.exp(self.logpdf(X)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.rowcov, self.colcov, size, + random_state) + + def entropy(self): + return self._dist._entropy(self.dims, self.rowpsd.log_pdet, + self.colpsd.log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# matrix_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'entropy']: + method = matrix_normal_gen.__dict__[name] + method_frozen = matrix_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + matnorm_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params) + +_dirichlet_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries determines the + dimensionality of the distribution. +""" +_dirichlet_doc_frozen_callparams = "" + +_dirichlet_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +dirichlet_docdict_params = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_docdict_noparams = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_check_parameters(alpha): + alpha = np.asarray(alpha) + if np.min(alpha) <= 0: + raise ValueError("All parameters must be greater than 0") + elif alpha.ndim != 1: + raise ValueError("Parameter vector 'a' must be one dimensional, " + f"but a.shape = {alpha.shape}.") + return alpha + + +def _dirichlet_check_input(alpha, x): + x = np.asarray(x) + + if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: + raise ValueError("Vector 'x' must have either the same number " + "of entries as, or one entry fewer than, " + f"parameter vector 'a', but alpha.shape = {alpha.shape} " + f"and x.shape = {x.shape}.") + + if x.shape[0] != alpha.shape[0]: + xk = np.array([1 - np.sum(x, 0)]) + if xk.ndim == 1: + x = np.append(x, xk) + elif xk.ndim == 2: + x = np.vstack((x, xk)) + else: + raise ValueError("The input must be one dimensional or a two " + "dimensional matrix containing the entries.") + + if np.min(x) < 0: + raise ValueError("Each entry in 'x' must be greater than or equal " + "to zero.") + + if np.max(x) > 1: + raise ValueError("Each entry in 'x' must be smaller or equal one.") + + # Check x_i > 0 or alpha_i > 1 + xeq0 = (x == 0) + alphalt1 = (alpha < 1) + if x.shape != alpha.shape: + alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape) + chk = np.logical_and(xeq0, alphalt1) + + if np.sum(chk): + raise ValueError("Each entry in 'x' must be greater than zero if its " + "alpha is less than one.") + + if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): + raise ValueError("The input vector 'x' must lie within the normal " + f"simplex. but np.sum(x, 0) = {np.sum(x, 0)}.") + + return x + + +def _lnB(alpha): + r"""Internal helper function to compute the log of the useful quotient. + + .. math:: + + B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)} + {\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)} + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + B : scalar + Helper quotient, internal use only + + """ + return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) + + +class dirichlet_gen(multi_rv_generic): + r"""A Dirichlet random variable. + + The ``alpha`` keyword specifies the concentration parameters of the + distribution. + + .. versionadded:: 0.15.0 + + Methods + ------- + pdf(x, alpha) + Probability density function. + logpdf(x, alpha) + Log of the probability density function. + rvs(alpha, size=1, random_state=None) + Draw random samples from a Dirichlet distribution. + mean(alpha) + The mean of the Dirichlet distribution + var(alpha) + The variance of the Dirichlet distribution + cov(alpha) + The covariance of the Dirichlet distribution + entropy(alpha) + Compute the differential entropy of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + Each :math:`\alpha` entry must be positive. The distribution has only + support on the simplex defined by + + .. math:: + \sum_{i=1}^{K} x_i = 1 + + where :math:`0 < x_i < 1`. + + If the quantiles don't lie within the simplex, a ValueError is raised. + + The probability density function for `dirichlet` is + + .. math:: + + f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} + + where + + .. math:: + + \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} + {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} + + and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the + concentration parameters and :math:`K` is the dimension of the space + where :math:`x` takes values. + + Note that the `dirichlet` interface is somewhat inconsistent. + The array returned by the rvs function is transposed + with respect to the format expected by the pdf and logpdf. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import dirichlet + + Generate a dirichlet random variable + + >>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles + >>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters + >>> dirichlet.pdf(quantiles, alpha) + 0.2843831684937255 + + The same PDF but following a log scale + + >>> dirichlet.logpdf(quantiles, alpha) + -1.2574327653159187 + + Once we specify the dirichlet distribution + we can then calculate quantities of interest + + >>> dirichlet.mean(alpha) # get the mean of the distribution + array([0.01960784, 0.24509804, 0.73529412]) + >>> dirichlet.var(alpha) # get variance + array([0.00089829, 0.00864603, 0.00909517]) + >>> dirichlet.entropy(alpha) # calculate the differential entropy + -4.3280162474082715 + + We can also return random samples from the distribution + + >>> dirichlet.rvs(alpha, size=1, random_state=1) + array([[0.00766178, 0.24670518, 0.74563305]]) + >>> dirichlet.rvs(alpha, size=2, random_state=2) + array([[0.01639427, 0.1292273 , 0.85437844], + [0.00156917, 0.19033695, 0.80809388]]) + + Alternatively, the object may be called (as a function) to fix + concentration parameters, returning a "frozen" Dirichlet + random variable: + + >>> rv = dirichlet(alpha) + >>> # Frozen object with the same methods but holding the given + >>> # concentration parameters fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) + + def __call__(self, alpha, seed=None): + return dirichlet_frozen(alpha, seed=seed) + + def _logpdf(self, x, alpha): + """Log of the Dirichlet probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + %(_dirichlet_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + lnB = _lnB(alpha) + return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0) + + def logpdf(self, x, alpha): + """Log of the Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = self._logpdf(x, alpha) + return _squeeze_output(out) + + def pdf(self, x, alpha): + """The Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + The probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = np.exp(self._logpdf(x, alpha)) + return _squeeze_output(out) + + def mean(self, alpha): + """Mean of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + mu : ndarray or scalar + Mean of the Dirichlet distribution. + + """ + alpha = _dirichlet_check_parameters(alpha) + + out = alpha / (np.sum(alpha)) + return _squeeze_output(out) + + def var(self, alpha): + """Variance of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + v : ndarray or scalar + Variance of the Dirichlet distribution. + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) + return _squeeze_output(out) + + def cov(self, alpha): + """Covariance matrix of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution. + """ + + alpha = _dirichlet_check_parameters(alpha) + alpha0 = np.sum(alpha) + a = alpha / alpha0 + + cov = (np.diag(a) - np.outer(a, a)) / (alpha0 + 1) + return _squeeze_output(cov) + + def entropy(self, alpha): + """ + Differential entropy of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Dirichlet distribution + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + lnB = _lnB(alpha) + K = alpha.shape[0] + + out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( + (alpha - 1) * scipy.special.psi(alpha)) + return _squeeze_output(out) + + def rvs(self, alpha, size=1, random_state=None): + """ + Draw random samples from a Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + size : int, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + """ + alpha = _dirichlet_check_parameters(alpha) + random_state = self._get_random_state(random_state) + return random_state.dirichlet(alpha, size=size) + + +dirichlet = dirichlet_gen() + + +class dirichlet_frozen(multi_rv_frozen): + def __init__(self, alpha, seed=None): + self.alpha = _dirichlet_check_parameters(alpha) + self._dist = dirichlet_gen(seed) + + def logpdf(self, x): + return self._dist.logpdf(x, self.alpha) + + def pdf(self, x): + return self._dist.pdf(x, self.alpha) + + def mean(self): + return self._dist.mean(self.alpha) + + def var(self): + return self._dist.var(self.alpha) + + def cov(self): + return self._dist.cov(self.alpha) + + def entropy(self): + return self._dist.entropy(self.alpha) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.alpha, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'cov', 'entropy']: + method = dirichlet_gen.__dict__[name] + method_frozen = dirichlet_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) + + +_wishart_doc_default_callparams = """\ +df : int + Degrees of freedom, must be greater than or equal to dimension of the + scale matrix +scale : array_like + Symmetric positive definite scale matrix of the distribution +""" + +_wishart_doc_callparams_note = "" + +_wishart_doc_frozen_callparams = "" + +_wishart_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +wishart_docdict_params = { + '_doc_default_callparams': _wishart_doc_default_callparams, + '_doc_callparams_note': _wishart_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +wishart_docdict_noparams = { + '_doc_default_callparams': _wishart_doc_frozen_callparams, + '_doc_callparams_note': _wishart_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class wishart_gen(multi_rv_generic): + r"""A Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal precision matrix (the inverse of the covariance + matrix). These arguments must satisfy the relationship + ``df > scale.ndim - 1``, but see notes on using the `rvs` method with + ``df < scale.ndim``. + + Methods + ------- + pdf(x, df, scale) + Probability density function. + logpdf(x, df, scale) + Log of the probability density function. + rvs(df, scale, size=1, random_state=None) + Draw random samples from a Wishart distribution. + entropy() + Compute the differential entropy of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Raises + ------ + scipy.linalg.LinAlgError + If the scale matrix `scale` is not positive definite. + + See Also + -------- + invwishart, chi2 + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. Symmetry is not checked; only the lower triangular + portion is used. + + The Wishart distribution is often denoted + + .. math:: + + W_p(\nu, \Sigma) + + where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the + :math:`p \times p` scale matrix. + + The probability density function for `wishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then + its PDF is given by: + + .. math:: + + f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } + |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} + \exp\left( -tr(\Sigma^{-1} S) / 2 \right) + + If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then + :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the Wishart + distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` + distribution. + + The algorithm [2]_ implemented by the `rvs` method may + produce numerically singular matrices with :math:`p - 1 < \nu < p`; the + user may wish to check for this condition and generate replacement samples + as necessary. + + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate + Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import wishart, chi2 + >>> x = np.linspace(1e-5, 8, 100) + >>> w = wishart.pdf(x, df=3, scale=1); w[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> c = chi2.pdf(x, 3); c[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> plt.plot(x, w) + >>> plt.show() + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" Wishart random + variable: + + >>> rv = wishart(df=1, scale=1) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """Create a frozen Wishart distribution. + + See `wishart_frozen` for more information. + """ + return wishart_frozen(df, scale, seed) + + def _process_parameters(self, df, scale): + if scale is None: + scale = 1.0 + scale = np.asarray(scale, dtype=float) + + if scale.ndim == 0: + scale = scale[np.newaxis, np.newaxis] + elif scale.ndim == 1: + scale = np.diag(scale) + elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: + raise ValueError("Array 'scale' must be square if it is two dimensional," + f" but scale.scale = {str(scale.shape)}.") + elif scale.ndim > 2: + raise ValueError("Array 'scale' must be at most two-dimensional," + " but scale.ndim = %d" % scale.ndim) + + dim = scale.shape[0] + + if df is None: + df = dim + elif not np.isscalar(df): + raise ValueError("Degrees of freedom must be a scalar.") + elif df <= dim - 1: + raise ValueError("Degrees of freedom must be greater than the " + "dimension of scale matrix minus 1.") + + return dim, df, scale + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x * np.eye(dim)[:, :, np.newaxis] + if x.ndim == 1: + if dim == 1: + x = x[np.newaxis, np.newaxis, :] + else: + x = np.diag(x)[:, :, np.newaxis] + elif x.ndim == 2: + if not x.shape[0] == x.shape[1]: + raise ValueError( + "Quantiles must be square if they are two dimensional," + f" but x.shape = {str(x.shape)}.") + x = x[:, :, np.newaxis] + elif x.ndim == 3: + if not x.shape[0] == x.shape[1]: + raise ValueError( + "Quantiles must be square in the first two dimensions " + f"if they are three dimensional, but x.shape = {str(x.shape)}.") + elif x.ndim > 3: + raise ValueError("Quantiles must be at most two-dimensional with" + " an additional dimension for multiple" + "components, but x.ndim = %d" % x.ndim) + + # Now we have 3-dim array; should have shape [dim, dim, *] + if not x.shape[0:2] == (dim, dim): + raise ValueError('Quantiles have incompatible dimensions: should' + f' be {(dim, dim)}, got {x.shape[0:2]}.') + + return x + + def _process_size(self, size): + size = np.asarray(size) + + if size.ndim == 0: + size = size[np.newaxis] + elif size.ndim > 1: + raise ValueError('Size must be an integer or tuple of integers;' + ' thus must have dimension <= 1.' + f' Got size.ndim = {str(tuple(size))}') + n = size.prod() + shape = tuple(size) + + return n, shape + + def _logpdf(self, x, dim, df, scale, log_det_scale, C): + """Log of the Wishart probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + scale : ndarray + Scale matrix + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # log determinant of x + # Note: x has components along the last axis, so that x.T has + # components alone the 0-th axis. Then since det(A) = det(A'), this + # gives us a 1-dim vector of determinants + + # Retrieve tr(scale^{-1} x) + log_det_x = np.empty(x.shape[-1]) + scale_inv_x = np.empty(x.shape) + tr_scale_inv_x = np.empty(x.shape[-1]) + for i in range(x.shape[-1]): + _, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i]) + tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace() + + # Log PDF + out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - + (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + + multigammaln(0.5*df, dim))) + + return out + + def logpdf(self, x, df, scale): + """Log of the Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + + # Cholesky decomposition of scale, get log(det(scale)) + C, log_det_scale = self._cholesky_logdet(scale) + + out = self._logpdf(x, dim, df, scale, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """Mean of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + return df * scale + + def mean(self, df, scale): + """Mean of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) + + def _mode(self, dim, df, scale): + """Mode of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + if df >= dim + 1: + out = (df-dim-1) * scale + else: + out = None + return out + + def mode(self, df, scale): + """Mode of the Wishart distribution + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float or None + The Mode of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _var(self, dim, df, scale): + """Variance of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + var = scale**2 + diag = scale.diagonal() # 1 x dim array + var += np.outer(diag, diag) + var *= df + return var + + def var(self, df, scale): + """Variance of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) + + def _standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + # Random normal variates for off-diagonal elements + n_tril = dim * (dim-1) // 2 + covariances = random_state.normal( + size=n*n_tril).reshape(shape+(n_tril,)) + + # Random chi-square variates for diagonal elements + variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 + for i in range(dim)]].reshape((dim,) + + shape[::-1]).T) + + # Create the A matri(ces) - lower triangular + A = np.zeros(shape + (dim, dim)) + + # Input the covariances + size_idx = tuple([slice(None, None, None)]*len(shape)) + tril_idx = np.tril_indices(dim, k=-1) + A[size_idx + tril_idx] = covariances + + # Input the variances + diag_idx = np.diag_indices(dim) + A[size_idx + diag_idx] = variances + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """Draw random samples from a Wishart distribution. + + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Calculate the matrices A, which are actually lower triangular + # Cholesky factorizations of a matrix B such that B ~ W(df, I) + A = self._standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = C A A' C', where SA ~ W(df, scale) + # Note: this is the product of a (lower) (lower) (lower)' (lower)' + # or, denoting B = AA', it is C B C' where C is the lower + # triangular Cholesky factorization of the scale matrix. + # this appears to conflict with the instructions in [1]_, which + # suggest that it should be D' B D where D is the lower + # triangular factorization of the scale matrix. However, it is + # meant to refer to the Bartlett (1933) representation of a + # Wishart random variate as L A A' L' where L is lower triangular + # so it appears that understanding D' to be upper triangular + # is either a typo in or misreading of [1]_. + for index in np.ndindex(shape): + CA = np.dot(C, A[index]) + A[index] = np.dot(CA, CA.T) + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """Draw random samples from a Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (``dim``, ``dim``), where + ``dim`` is the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + """Compute the differential entropy of the Wishart. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'entropy' instead. + + """ + return ( + 0.5 * (dim+1) * log_det_scale + + 0.5 * dim * (dim+1) * _LOG_2 + + multigammaln(0.5*df, dim) - + 0.5 * (df - dim - 1) * np.sum( + [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] + ) + + 0.5 * df * dim + ) + + def entropy(self, df, scale): + """Compute the differential entropy of the Wishart. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Wishart distribution + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + def _cholesky_logdet(self, scale): + """Compute Cholesky decomposition and determine (log(det(scale)). + + Parameters + ---------- + scale : ndarray + Scale matrix. + + Returns + ------- + c_decomp : ndarray + The Cholesky decomposition of `scale`. + logdet : scalar + The log of the determinant of `scale`. + + Notes + ----- + This computation of ``logdet`` is equivalent to + ``np.linalg.slogdet(scale)``. It is ~2x faster though. + + """ + c_decomp = scipy.linalg.cholesky(scale, lower=True) + logdet = 2 * np.sum(np.log(c_decomp.diagonal())) + return c_decomp, logdet + + +wishart = wishart_gen() + + +class wishart_frozen(multi_rv_frozen): + """Create a frozen Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + def __init__(self, df, scale, seed=None): + self._dist = wishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale) + self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + + out = self._dist._logpdf(x, self.dim, self.df, self.scale, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: + method = wishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + + +class invwishart_gen(wishart_gen): + r"""An inverse Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal covariance matrix. + + Methods + ------- + pdf(x, df, scale) + Probability density function. + logpdf(x, df, scale) + Log of the probability density function. + rvs(df, scale, size=1, random_state=None) + Draw random samples from an inverse Wishart distribution. + entropy(df, scale) + Differential entropy of the distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Raises + ------ + scipy.linalg.LinAlgError + If the scale matrix `scale` is not positive definite. + + See Also + -------- + wishart + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. Symmetry is not checked; only the lower triangular + portion is used. + + The inverse Wishart distribution is often denoted + + .. math:: + + W_p^{-1}(\nu, \Psi) + + where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the + :math:`p \times p` scale matrix. + + The probability density function for `invwishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, + then its PDF is given by: + + .. math:: + + f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } + |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} + \exp\left( -tr(\Sigma S^{-1}) / 2 \right) + + If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then + :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the inverse + Wishart distribution :math:`W_1(\nu, 1)` collapses to the + inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` + and scale = :math:`\frac{1}{2}`. + + Instead of inverting a randomly generated Wishart matrix as described in [2], + here the algorithm in [4] is used to directly generate a random inverse-Wishart + matrix without inversion. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications + in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, + 1985. + .. [3] Gupta, M. and Srivastava, S. "Parametric Bayesian Estimation of + Differential Entropy and Relative Entropy". Entropy 12, 818 - 843. + 2010. + .. [4] S.D. Axen, "Efficiently generating inverse-Wishart matrices and + their Cholesky factors", :arXiv:`2310.15884v1`. 2023. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import invwishart, invgamma + >>> x = np.linspace(0.01, 1, 100) + >>> iw = invwishart.pdf(x, df=6, scale=1) + >>> iw[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> ig = invgamma.pdf(x, 6/2., scale=1./2) + >>> ig[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> plt.plot(x, iw) + >>> plt.show() + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" inverse Wishart + random variable: + + >>> rv = invwishart(df=1, scale=1) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """Create a frozen inverse Wishart distribution. + + See `invwishart_frozen` for more information. + + """ + return invwishart_frozen(df, scale, seed) + + def _logpdf(self, x, dim, df, log_det_scale, C): + """Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function. + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # Retrieve tr(scale x^{-1}) + log_det_x = np.empty(x.shape[-1]) + tr_scale_x_inv = np.empty(x.shape[-1]) + trsm = get_blas_funcs(('trsm'), (x,)) + if dim > 1: + for i in range(x.shape[-1]): + Cx, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + A = trsm(1., Cx, C, side=0, lower=True) + tr_scale_x_inv[i] = np.linalg.norm(A)**2 + else: + log_det_x[:] = np.log(x[0, 0]) + tr_scale_x_inv[:] = C[0, 0]**2 / x[0, 0] + + # Log PDF + out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - + (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - + multigammaln(0.5*df, dim)) + + return out + + def logpdf(self, x, df, scale): + """Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + C, log_det_scale = self._cholesky_logdet(scale) + out = self._logpdf(x, dim, df, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """Inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """Mean of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + if df > dim + 1: + out = scale / (df - dim - 1) + else: + out = None + return out + + def mean(self, df, scale): + """Mean of the inverse Wishart distribution. + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus one. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float or None + The mean of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _mode(self, dim, df, scale): + """Mode of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + return scale / (df + dim + 1) + + def mode(self, df, scale): + """Mode of the inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float + The Mode of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) + + def _var(self, dim, df, scale): + """Variance of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + if df > dim + 3: + var = (df - dim + 1) * scale**2 + diag = scale.diagonal() # 1 x dim array + var += (df - dim - 1) * np.outer(diag, diag) + var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) + else: + var = None + return var + + def var(self, df, scale): + """Variance of the inverse Wishart distribution. + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus three. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _inv_standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Returns + ------- + A : ndarray + Random variates of shape (`shape`) + (``dim``, ``dim``). + Each slice `A[..., :, :]` is lower-triangular, and its + inverse is the lower Cholesky factor of a draw from + `invwishart(df, np.eye(dim))`. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + A = np.zeros(shape + (dim, dim)) + + # Random normal variates for off-diagonal elements + tri_rows, tri_cols = np.tril_indices(dim, k=-1) + n_tril = dim * (dim-1) // 2 + A[..., tri_rows, tri_cols] = random_state.normal( + size=(*shape, n_tril), + ) + + # Random chi variates for diagonal elements + rows = np.arange(dim) + chi_dfs = (df - dim + 1) + rows + A[..., rows, rows] = random_state.chisquare( + df=chi_dfs, size=(*shape, dim), + )**0.5 + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Get random draws A such that inv(A) ~ iW(df, I) + A = self._inv_standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) + trsm = get_blas_funcs(('trsm'), (A,)) + trmm = get_blas_funcs(('trmm'), (A,)) + + for index in np.ndindex(A.shape[:-2]): + if dim > 1: + # Calculate CA + # Get CA = C A^{-1} via triangular solver + CA = trsm(1., A[index], C, side=1, lower=True) + # get SA + A[index] = trmm(1., CA, CA, side=1, lower=True, trans_a=True) + else: + A[index][0, 0] = (C[0, 0] / A[index][0, 0])**2 + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (``dim``, ``dim``), where + ``dim`` is the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + # reference: eq. (17) from ref. 3 + psi_eval_points = [0.5 * (df - dim + i) for i in range(1, dim + 1)] + psi_eval_points = np.asarray(psi_eval_points) + return multigammaln(0.5 * df, dim) + 0.5 * dim * df + \ + 0.5 * (dim + 1) * (log_det_scale - _LOG_2) - \ + 0.5 * (df + dim + 1) * \ + psi(psi_eval_points, out=psi_eval_points).sum() + + def entropy(self, df, scale): + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + +invwishart = invwishart_gen() + + +class invwishart_frozen(multi_rv_frozen): + def __init__(self, df, scale, seed=None): + """Create a frozen inverse Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is None the `numpy.random.Generator` singleton is used. + If `seed` is an int, a new ``Generator`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` instance then that instance is + used. + + """ + self._dist = invwishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale + ) + + # Get the determinant via Cholesky factorization + self.C = scipy.linalg.cholesky(self.scale, lower=True) + self.log_det_scale = 2 * np.sum(np.log(self.C.diagonal())) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.dim, self.df, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# inverse Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: + method = invwishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + +_multinomial_doc_default_callparams = """\ +n : int + Number of trials +p : array_like + Probability of a trial falling into each category; should sum to 1 +""" + +_multinomial_doc_callparams_note = """\ +`n` should be a nonnegative integer. Each element of `p` should be in the +interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to +1, the last element of the `p` array is not used and is replaced with the +remaining probability left over from the earlier elements. +""" + +_multinomial_doc_frozen_callparams = "" + +_multinomial_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +multinomial_docdict_params = { + '_doc_default_callparams': _multinomial_doc_default_callparams, + '_doc_callparams_note': _multinomial_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +multinomial_docdict_noparams = { + '_doc_default_callparams': _multinomial_doc_frozen_callparams, + '_doc_callparams_note': _multinomial_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multinomial_gen(multi_rv_generic): + r"""A multinomial random variable. + + Methods + ------- + pmf(x, n, p) + Probability mass function. + logpmf(x, n, p) + Log of the probability mass function. + rvs(n, p, size=1, random_state=None) + Draw random samples from a multinomial distribution. + entropy(n, p) + Compute the entropy of the multinomial distribution. + cov(n, p) + Compute the covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + The probability mass function for `multinomial` is + + .. math:: + + f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k}, + + supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a + nonnegative integer and their sum is :math:`n`. + + .. versionadded:: 0.19.0 + + Examples + -------- + + >>> from scipy.stats import multinomial + >>> rv = multinomial(8, [0.3, 0.2, 0.5]) + >>> rv.pmf([1, 3, 4]) + 0.042000000000000072 + + The multinomial distribution for :math:`k=2` is identical to the + corresponding binomial distribution (tiny numerical differences + notwithstanding): + + >>> from scipy.stats import binom + >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6]) + 0.29030399999999973 + >>> binom.pmf(3, 7, 0.4) + 0.29030400000000012 + + The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support + broadcasting, under the convention that the vector parameters (``x`` and + ``p``) are interpreted as if each row along the last axis is a single + object. For instance: + + >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7]) + array([0.2268945, 0.25412184]) + + Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``, + but following the rules mentioned above they behave as if the rows + ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single + object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and + ``p.shape = ()``. To obtain the individual elements without broadcasting, + we would do this: + + >>> multinomial.pmf([3, 4], n=7, p=[.3, .7]) + 0.2268945 + >>> multinomial.pmf([3, 5], 8, p=[.3, .7]) + 0.25412184 + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``p.shape[-1]``. For example: + + >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) + array([[[ 0.84, -0.84], + [-0.84, 0.84]], + [[ 1.2 , -1.2 ], + [-1.2 , 1.2 ]]]) + + In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and + following the rules above, these broadcast as if ``p.shape == (2,)``. + Thus the result should also be of shape ``(2,)``, but since each output is + a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``, + where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and + ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``. + + Alternatively, the object may be called (as a function) to fix the `n` and + `p` parameters, returning a "frozen" multinomial random variable: + + >>> rv = multinomial(n=7, p=[.3, .7]) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + See also + -------- + scipy.stats.binom : The binomial distribution. + numpy.random.Generator.multinomial : Sampling from the multinomial distribution. + scipy.stats.multivariate_hypergeom : + The multivariate hypergeometric distribution. + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = \ + doccer.docformat(self.__doc__, multinomial_docdict_params) + + def __call__(self, n, p, seed=None): + """Create a frozen multinomial distribution. + + See `multinomial_frozen` for more information. + """ + return multinomial_frozen(n, p, seed) + + def _process_parameters(self, n, p, eps=1e-15): + """Returns: n_, p_, npcond. + + n_ and p_ are arrays of the correct shape; npcond is a boolean array + flagging values out of the domain. + """ + p = np.array(p, dtype=np.float64, copy=True) + p_adjusted = 1. - p[..., :-1].sum(axis=-1) + i_adjusted = np.abs(p_adjusted) > eps + p[i_adjusted, -1] = p_adjusted[i_adjusted] + + # true for bad p + pcond = np.any(p < 0, axis=-1) + pcond |= np.any(p > 1, axis=-1) + + n = np.array(n, dtype=int, copy=True) + + # true for bad n + ncond = n < 0 + + return n, p, ncond | pcond + + def _process_quantiles(self, x, n, p): + """Returns: x_, xcond. + + x_ is an int array; xcond is a boolean array flagging values out of the + domain. + """ + xx = np.asarray(x, dtype=int) + + if xx.ndim == 0: + raise ValueError("x must be an array.") + + if xx.size != 0 and not xx.shape[-1] == p.shape[-1]: + raise ValueError("Size of each quantile should be size of p: " + "received %d, but expected %d." % + (xx.shape[-1], p.shape[-1])) + + # true for x out of the domain + cond = np.any(xx != x, axis=-1) + cond |= np.any(xx < 0, axis=-1) + cond = cond | (np.sum(xx, axis=-1) != n) + + return xx, cond + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + if result.ndim == 0: + return bad_value + result[...] = bad_value + return result + + def _logpmf(self, x, n, p): + return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1) + + def logpmf(self, x, n, p): + """Log of the Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + x, xcond = self._process_quantiles(x, n, p) + + result = self._logpmf(x, n, p) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, -np.inf) + + # replace values bad for n or p; broadcast npcond to the right shape + npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) + return self._checkresult(result, npcond_, np.nan) + + def pmf(self, x, n, p): + """Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + return np.exp(self.logpmf(x, n, p)) + + def mean(self, n, p): + """Mean of the Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + result = n[..., np.newaxis]*p + return self._checkresult(result, npcond, np.nan) + + def cov(self, n, p): + """Covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + + nn = n[..., np.newaxis, np.newaxis] + result = nn * np.einsum('...j,...k->...jk', -p, p) + + # change the diagonal + for i in range(p.shape[-1]): + result[..., i, i] += n*p[..., i] + + return self._checkresult(result, npcond, np.nan) + + def entropy(self, n, p): + r"""Compute the entropy of the multinomial distribution. + + The entropy is computed using this expression: + + .. math:: + + f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i + + \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x! + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Multinomial distribution + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + + x = np.r_[1:np.max(n)+1] + + term1 = n*np.sum(entr(p), axis=-1) + term1 -= gammaln(n+1) + + n = n[..., np.newaxis] + new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 + x.shape += (1,)*new_axes_needed + + term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1), + axis=(-1, -1-new_axes_needed)) + + return self._checkresult(term1 + term2, npcond, np.nan) + + def rvs(self, n, p, size=None, random_state=None): + """Draw random samples from a Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of shape (`size`, `len(p)`) + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + random_state = self._get_random_state(random_state) + return random_state.multinomial(n, p, size) + + +multinomial = multinomial_gen() + + +class multinomial_frozen(multi_rv_frozen): + r"""Create a frozen Multinomial distribution. + + Parameters + ---------- + n : int + number of trials + p: array_like + probability of a trial falling into each category; should sum to 1 + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + """ + def __init__(self, n, p, seed=None): + self._dist = multinomial_gen(seed) + self.n, self.p, self.npcond = self._dist._process_parameters(n, p) + + # monkey patch self._dist + def _process_parameters(n, p): + return self.n, self.p, self.npcond + + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.n, self.p) + + def pmf(self, x): + return self._dist.pmf(x, self.n, self.p) + + def mean(self): + return self._dist.mean(self.n, self.p) + + def cov(self): + return self._dist.cov(self.n, self.p) + + def entropy(self): + return self._dist.entropy(self.n, self.p) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.n, self.p, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multinomial and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']: + method = multinomial_gen.__dict__[name] + method_frozen = multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, multinomial_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + multinomial_docdict_params) + + +class special_ortho_group_gen(multi_rv_generic): + r"""A Special Orthogonal matrix (SO(N)) random variable. + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(N)) with a determinant of +1. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from SO(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is wrapping the random_rot code from the MDP Toolkit, + https://github.com/mdp-toolkit/mdp-toolkit + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(N)). + The algorithm is described in the paper + Stewart, G.W., "The efficient generation of random orthogonal + matrices with an application to condition estimators", SIAM Journal + on Numerical Analysis, 17(3), pp. 403-409, 1980. + For more information see + https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization + + See also the similar `ortho_group`. For a random rotation in three + dimensions, see `scipy.spatial.transform.Rotation.random`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import special_ortho_group + >>> x = special_ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> scipy.linalg.det(x) + 1.0 + + This generates one random matrix from SO(3). It is orthogonal and + has a determinant of 1. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, returning a "frozen" special_ortho_group random variable: + + >>> rv = special_ortho_group(5) + >>> # Frozen object with the same methods but holding the + >>> # dimension parameter fixed. + + See Also + -------- + ortho_group, scipy.spatial.transform.Rotation.random + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen SO(N) distribution. + + See `special_ortho_group_frozen` for more information. + """ + return special_ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("""Dimension of rotation must be specified, + and must be a scalar greater than 1.""") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from SO(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + size = (size,) if size > 1 else () + + dim = self._process_parameters(dim) + + # H represents a (dim, dim) matrix, while D represents the diagonal of + # a (dim, dim) diagonal matrix. The algorithm that follows is + # broadcasted on the leading shape in `size` to vectorize along + # samples. + H = np.empty(size + (dim, dim)) + H[..., :, :] = np.eye(dim) + D = np.empty(size + (dim,)) + + for n in range(dim-1): + + # x is a vector with length dim-n, xrow and xcol are views of it as + # a row vector and column vector respectively. It's important they + # are views and not copies because we are going to modify x + # in-place. + x = random_state.normal(size=size + (dim-n,)) + xrow = x[..., None, :] + xcol = x[..., :, None] + + # This is the squared norm of x, without vectorization it would be + # dot(x, x), to have proper broadcasting we use matmul and squeeze + # out (convert to scalar) the resulting 1x1 matrix + norm2 = np.matmul(xrow, xcol).squeeze((-2, -1)) + + x0 = x[..., 0].copy() + D[..., n] = np.where(x0 != 0, np.sign(x0), 1) + x[..., 0] += D[..., n]*np.sqrt(norm2) + + # In renormalizing x we have to append an additional axis with + # [..., None] to broadcast the scalar against the vector x + x /= np.sqrt((norm2 - x0**2 + x[..., 0]**2) / 2.)[..., None] + + # Householder transformation, without vectorization the RHS can be + # written as outer(H @ x, x) (apart from the slicing) + H[..., :, n:] -= np.matmul(H[..., :, n:], xcol) * xrow + + D[..., -1] = (-1)**(dim-1)*D[..., :-1].prod(axis=-1) + + # Without vectorization this could be written as H = diag(D) @ H, + # left-multiplication by a diagonal matrix amounts to multiplying each + # row of H by an element of the diagonal, so we add a dummy axis for + # the column index + H *= D[..., :, None] + return H + + +special_ortho_group = special_ortho_group_gen() + + +class special_ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen SO(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import special_ortho_group + >>> g = special_ortho_group(5) + >>> x = g.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = special_ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class ortho_group_gen(multi_rv_generic): + r"""An Orthogonal matrix (O(N)) random variable. + + Return a random orthogonal matrix, drawn from the O(N) Haar + distribution (the only uniform distribution on O(N)). + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from O(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is closely related to `special_ortho_group`. + + Some care is taken to avoid numerical error, as per the paper by Mezzadri. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import ortho_group + >>> x = ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> np.fabs(scipy.linalg.det(x)) + 1.0 + + This generates one random matrix from O(3). It is orthogonal and + has a determinant of +1 or -1. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, returning a "frozen" ortho_group random variable: + + >>> rv = ortho_group(5) + >>> # Frozen object with the same methods but holding the + >>> # dimension parameter fixed. + + See Also + -------- + special_ortho_group + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen O(N) distribution. + + See `ortho_group_frozen` for more information. + """ + return ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from O(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + + dim = self._process_parameters(dim) + + size = (size,) if size > 1 else () + z = random_state.normal(size=size + (dim, dim)) + q, r = np.linalg.qr(z) + # The last two dimensions are the rows and columns of R matrices. + # Extract the diagonals. Note that this eliminates a dimension. + d = r.diagonal(offset=0, axis1=-2, axis2=-1) + # Add back a dimension for proper broadcasting: we're dividing + # each row of each R matrix by the diagonal of the R matrix. + q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly + return q + + +ortho_group = ortho_group_gen() + + +class ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen O(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import ortho_group + >>> g = ortho_group(5) + >>> x = g.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class random_correlation_gen(multi_rv_generic): + r"""A random correlation matrix. + + Return a random correlation matrix, given a vector of eigenvalues. + + The `eigs` keyword specifies the eigenvalues of the correlation matrix, + and implies the dimension. + + Methods + ------- + rvs(eigs=None, random_state=None) + Draw random correlation matrices, all with eigenvalues eigs. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + Notes + ----- + + Generates a random correlation matrix following a numerically stable + algorithm spelled out by Davies & Higham. This algorithm uses a single O(N) + similarity transformation to construct a symmetric positive semi-definite + matrix, and applies a series of Givens rotations to scale it to have ones + on the diagonal. + + References + ---------- + + .. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation + of correlation matrices and their factors", BIT 2000, Vol. 40, + No. 4, pp. 640 651 + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import random_correlation + >>> rng = np.random.default_rng() + >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng) + >>> x + array([[ 1. , -0.02423399, 0.03130519, 0.4946965 ], + [-0.02423399, 1. , 0.20334736, 0.04039817], + [ 0.03130519, 0.20334736, 1. , 0.02694275], + [ 0.4946965 , 0.04039817, 0.02694275, 1. ]]) + >>> import scipy.linalg + >>> e, v = scipy.linalg.eigh(x) + >>> e + array([ 0.5, 0.8, 1.2, 1.5]) + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7): + """Create a frozen random correlation matrix. + + See `random_correlation_frozen` for more information. + """ + return random_correlation_frozen(eigs, seed=seed, tol=tol, + diag_tol=diag_tol) + + def _process_parameters(self, eigs, tol): + eigs = np.asarray(eigs, dtype=float) + dim = eigs.size + + if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1: + raise ValueError("Array 'eigs' must be a vector of length " + "greater than 1.") + + if np.fabs(np.sum(eigs) - dim) > tol: + raise ValueError("Sum of eigenvalues must equal dimensionality.") + + for x in eigs: + if x < -tol: + raise ValueError("All eigenvalues must be non-negative.") + + return dim, eigs + + def _givens_to_1(self, aii, ajj, aij): + """Computes a 2x2 Givens matrix to put 1's on the diagonal. + + The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ]. + + The output matrix g is a 2x2 anti-symmetric matrix of the form + [ c s ; -s c ]; the elements c and s are returned. + + Applying the output matrix to the input matrix (as b=g.T M g) + results in a matrix with bii=1, provided tr(M) - det(M) >= 1 + and floating point issues do not occur. Otherwise, some other + valid rotation is returned. When tr(M)==2, also bjj=1. + + """ + aiid = aii - 1. + ajjd = ajj - 1. + + if ajjd == 0: + # ajj==1, so swap aii and ajj to avoid division by zero + return 0., 1. + + dd = math.sqrt(max(aij**2 - aiid*ajjd, 0)) + + # The choice of t should be chosen to avoid cancellation [1] + t = (aij + math.copysign(dd, aij)) / ajjd + c = 1. / math.sqrt(1. + t*t) + if c == 0: + # Underflow + s = 1.0 + else: + s = c*t + return c, s + + def _to_corr(self, m): + """ + Given a psd matrix m, rotate to put one's on the diagonal, turning it + into a correlation matrix. This also requires the trace equal the + dimensionality. Note: modifies input matrix + """ + # Check requirements for in-place Givens + if not (m.flags.c_contiguous and m.dtype == np.float64 and + m.shape[0] == m.shape[1]): + raise ValueError() + + d = m.shape[0] + for i in range(d-1): + if m[i, i] == 1: + continue + elif m[i, i] > 1: + for j in range(i+1, d): + if m[j, j] < 1: + break + else: + for j in range(i+1, d): + if m[j, j] > 1: + break + + c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j]) + + # Use BLAS to apply Givens rotations in-place. Equivalent to: + # g = np.eye(d) + # g[i, i] = g[j,j] = c + # g[j, i] = -s; g[i, j] = s + # m = np.dot(g.T, np.dot(m, g)) + mv = m.ravel() + drot(mv, mv, c, -s, n=d, + offx=i*d, incx=1, offy=j*d, incy=1, + overwrite_x=True, overwrite_y=True) + drot(mv, mv, c, -s, n=d, + offx=i, incx=d, offy=j, incy=d, + overwrite_x=True, overwrite_y=True) + + return m + + def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7): + """Draw random correlation matrices. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + """ + dim, eigs = self._process_parameters(eigs, tol=tol) + + random_state = self._get_random_state(random_state) + + m = ortho_group.rvs(dim, random_state=random_state) + m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m + m = self._to_corr(m) # Carefully rotate to unit diagonal + + # Check diagonal + if abs(m.diagonal() - 1).max() > diag_tol: + raise RuntimeError("Failed to generate a valid correlation matrix") + + return m + + +random_correlation = random_correlation_gen() + + +class random_correlation_frozen(multi_rv_frozen): + def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7): + """Create a frozen random correlation matrix distribution. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + """ # numpy/numpydoc#87 # noqa: E501 + + self._dist = random_correlation_gen(seed) + self.tol = tol + self.diag_tol = diag_tol + _, self.eigs = self._dist._process_parameters(eigs, tol=self.tol) + + def rvs(self, random_state=None): + return self._dist.rvs(self.eigs, random_state=random_state, + tol=self.tol, diag_tol=self.diag_tol) + + +class unitary_group_gen(multi_rv_generic): + r"""A matrix-valued U(N) random variable. + + Return a random unitary matrix. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from U(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices, must be greater than 1. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is similar to `ortho_group`. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import unitary_group + >>> x = unitary_group.rvs(3) + + >>> np.dot(x, x.conj().T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) # may vary + + This generates one random matrix from U(3). The dot product confirms that + it is unitary up to machine precision. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, return a "frozen" unitary_group random variable: + + >>> rv = unitary_group(5) + + See Also + -------- + ortho_group + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen (U(N)) n-dimensional unitary matrix distribution. + + See `unitary_group_frozen` for more information. + """ + return unitary_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from U(N). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + + dim = self._process_parameters(dim) + + size = (size,) if size > 1 else () + z = 1/math.sqrt(2)*(random_state.normal(size=size + (dim, dim)) + + 1j*random_state.normal(size=size + (dim, dim))) + q, r = np.linalg.qr(z) + # The last two dimensions are the rows and columns of R matrices. + # Extract the diagonals. Note that this eliminates a dimension. + d = r.diagonal(offset=0, axis1=-2, axis2=-1) + # Add back a dimension for proper broadcasting: we're dividing + # each row of each R matrix by the diagonal of the R matrix. + q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly + return q + + +unitary_group = unitary_group_gen() + + +class unitary_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen (U(N)) n-dimensional unitary matrix distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import unitary_group + >>> x = unitary_group(3) + >>> x.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = unitary_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +_mvt_doc_default_callparams = """\ +loc : array_like, optional + Location of the distribution. (default ``0``) +shape : array_like, optional + Positive semidefinite matrix of the distribution. (default ``1``) +df : float, optional + Degrees of freedom of the distribution; must be greater than zero. + If ``np.inf`` then results are multivariate normal. The default is ``1``. +allow_singular : bool, optional + Whether to allow a singular matrix. (default ``False``) +""" + +_mvt_doc_callparams_note = """\ +Setting the parameter `loc` to ``None`` is equivalent to having `loc` +be the zero-vector. The parameter `shape` can be a scalar, in which case +the shape matrix is the identity times that value, a vector of +diagonal entries for the shape matrix, or a two-dimensional array_like. +""" + +_mvt_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mvt_docdict_params = { + '_mvt_doc_default_callparams': _mvt_doc_default_callparams, + '_mvt_doc_callparams_note': _mvt_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvt_docdict_noparams = { + '_mvt_doc_default_callparams': "", + '_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_t_gen(multi_rv_generic): + r"""A multivariate t-distributed random variable. + + The `loc` parameter specifies the location. The `shape` parameter specifies + the positive semidefinite shape matrix. The `df` parameter specifies the + degrees of freedom. + + In addition to calling the methods below, the object itself may be called + as a function to fix the location, shape matrix, and degrees of freedom + parameters, returning a "frozen" multivariate t-distribution random. + + Methods + ------- + pdf(x, loc=None, shape=1, df=1, allow_singular=False) + Probability density function. + logpdf(x, loc=None, shape=1, df=1, allow_singular=False) + Log of the probability density function. + cdf(x, loc=None, shape=1, df=1, allow_singular=False, *, + maxpts=None, lower_limit=None, random_state=None) + Cumulative distribution function. + rvs(loc=None, shape=1, df=1, size=1, random_state=None) + Draw random samples from a multivariate t-distribution. + entropy(loc=None, shape=1, df=1) + Differential entropy of a multivariate t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_mvt_doc_callparams_note)s + The matrix `shape` must be a (symmetric) positive semidefinite matrix. The + determinant and inverse of `shape` are computed as the pseudo-determinant + and pseudo-inverse, respectively, so that `shape` does not need to have + full rank. + + The probability density function for `multivariate_t` is + + .. math:: + + f(x) = \frac{\Gamma((\nu + p)/2)}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}} + \left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top} + \boldsymbol{\Sigma}^{-1} + (\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2}, + + where :math:`p` is the dimension of :math:`\mathbf{x}`, + :math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location, + :math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape + matrix, and :math:`\nu` is the degrees of freedom. + + .. versionadded:: 1.6.0 + + References + ---------- + .. [1] Arellano-Valle et al. "Shannon Entropy and Mutual Information for + Multivariate Skew-Elliptical Distributions". Scandinavian Journal + of Statistics. Vol. 40, issue 1. + + Examples + -------- + The object may be called (as a function) to fix the `loc`, `shape`, + `df`, and `allow_singular` parameters, returning a "frozen" + multivariate_t random variable: + + >>> import numpy as np + >>> from scipy.stats import multivariate_t + >>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2) + >>> # Frozen object with the same methods but holding the given location, + >>> # scale, and degrees of freedom fixed. + + Create a contour plot of the PDF. + + >>> import matplotlib.pyplot as plt + >>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01] + >>> pos = np.dstack((x, y)) + >>> fig, ax = plt.subplots(1, 1) + >>> ax.set_aspect('equal') + >>> plt.contourf(x, y, rv.pdf(pos)) + + """ + + def __init__(self, seed=None): + """Initialize a multivariate t-distributed random variable. + + Parameters + ---------- + seed : Random state. + + """ + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params) + self._random_state = check_random_state(seed) + + def __call__(self, loc=None, shape=1, df=1, allow_singular=False, + seed=None): + """Create a frozen multivariate t-distribution. + + See `multivariate_t_frozen` for parameters. + """ + if df == np.inf: + return multivariate_normal_frozen(mean=loc, cov=shape, + allow_singular=allow_singular, + seed=seed) + return multivariate_t_frozen(loc=loc, shape=shape, df=df, + allow_singular=allow_singular, seed=seed) + + def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False): + """Multivariate t-distribution probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the probability density function. + %(_mvt_doc_default_callparams)s + + Returns + ------- + pdf : Probability density function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.pdf(x, loc, shape, df) + 0.00075713 + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + x = self._process_quantiles(x, dim) + shape_info = _PSD(shape, allow_singular=allow_singular) + logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, + dim, shape_info.rank) + return np.exp(logpdf) + + def logpdf(self, x, loc=None, shape=1, df=1): + """Log of the multivariate t-distribution probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability density + function. + %(_mvt_doc_default_callparams)s + + Returns + ------- + logpdf : Log of the probability density function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.logpdf(x, loc, shape, df) + -7.1859802 + + See Also + -------- + pdf : Probability density function. + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + x = self._process_quantiles(x, dim) + shape_info = _PSD(shape) + return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, + shape_info.rank) + + def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank): + """Utility method `pdf`, `logpdf` for parameters. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability density + function. + loc : ndarray + Location of the distribution. + prec_U : ndarray + A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse + of the shape matrix. + log_pdet : float + Logarithm of the determinant of the shape matrix. + df : float + Degrees of freedom of the distribution. + dim : int + Dimension of the quantiles x. + rank : int + Rank of the shape matrix. + + Notes + ----- + As this function does no argument checking, it should not be called + directly; use 'logpdf' instead. + + """ + if df == np.inf: + return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank) + + dev = x - loc + maha = np.square(np.dot(dev, prec_U)).sum(axis=-1) + + t = 0.5 * (df + dim) + A = gammaln(t) + B = gammaln(0.5 * df) + C = dim/2. * np.log(df * np.pi) + D = 0.5 * log_pdet + E = -t * np.log(1 + (1./df) * maha) + + return _squeeze_output(A - B - C - D + E) + + def _cdf(self, x, loc, shape, df, dim, maxpts=None, lower_limit=None, + random_state=None): + + # All of this - random state validation, maxpts, apply_along_axis, + # etc. needs to go in this private method unless we want + # frozen distribution's `cdf` method to duplicate it or call `cdf`, + # which would require re-processing parameters + if random_state is not None: + rng = check_random_state(random_state) + else: + rng = self._random_state + + if not maxpts: + maxpts = 1000 * dim + + x = self._process_quantiles(x, dim) + lower_limit = (np.full(loc.shape, -np.inf) + if lower_limit is None else lower_limit) + + # remove the mean + x, lower_limit = x - loc, lower_limit - loc + + b, a = np.broadcast_arrays(x, lower_limit) + i_swap = b < a + signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a, b = a.copy(), b.copy() + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + n = x.shape[-1] + limits = np.concatenate((a, b), axis=-1) + + def func1d(limits): + a, b = limits[:n], limits[n:] + return _qmvt(maxpts, df, shape, a, b, rng)[0] + + res = np.apply_along_axis(func1d, -1, limits) * signs + # Fixing the output shape for existing distributions is a separate + # issue. For now, let's keep this consistent with pdf. + return _squeeze_output(res) + + def cdf(self, x, loc=None, shape=1, df=1, allow_singular=False, *, + maxpts=None, lower_limit=None, random_state=None): + """Multivariate t-distribution cumulative distribution function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the cumulative distribution function. + %(_mvt_doc_default_callparams)s + maxpts : int, optional + Maximum number of points to use for integration. The default is + 1000 times the number of dimensions. + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + %(_doc_random_state)s + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.cdf(x, loc, shape, df) + 0.64798491 + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + shape = _PSD(shape, allow_singular=allow_singular)._M + + return self._cdf(x, loc, shape, df, dim, maxpts, + lower_limit, random_state) + + def _entropy(self, dim, df=1, shape=1): + if df == np.inf: + return multivariate_normal(None, cov=shape).entropy() + + shape_info = _PSD(shape) + shape_term = 0.5 * shape_info.log_pdet + + def regular(dim, df): + halfsum = 0.5 * (dim + df) + half_df = 0.5 * df + return ( + -gammaln(halfsum) + gammaln(half_df) + + 0.5 * dim * np.log(df * np.pi) + halfsum + * (psi(halfsum) - psi(half_df)) + + shape_term + ) + + def asymptotic(dim, df): + # Formula from Wolfram Alpha: + # "asymptotic expansion -gammaln((m+d)/2) + gammaln(d/2) + (m*log(d*pi))/2 + # + ((m+d)/2) * (digamma((m+d)/2) - digamma(d/2))" + return ( + dim * norm._entropy() + dim / df + - dim * (dim - 2) * df**-2.0 / 4 + + dim**2 * (dim - 2) * df**-3.0 / 6 + + dim * (-3 * dim**3 + 8 * dim**2 - 8) * df**-4.0 / 24 + + dim**2 * (3 * dim**3 - 10 * dim**2 + 16) * df**-5.0 / 30 + + shape_term + )[()] + + # preserves ~12 digits accuracy up to at least `dim=1e5`. See gh-18465. + threshold = dim * 100 * 4 / (np.log(dim) + 1) + return _lazywhere(df >= threshold, (dim, df), f=asymptotic, f2=regular) + + def entropy(self, loc=None, shape=1, df=1): + """Calculate the differential entropy of a multivariate + t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + + Returns + ------- + h : float + Differential entropy + + """ + dim, loc, shape, df = self._process_parameters(None, shape, df) + return self._entropy(dim, df, shape) + + def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None): + """Draw random samples from a multivariate t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `P`), where `P` is the + dimension of the random variable. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.rvs(loc, shape, df) + array([[0.93477495, 3.00408716]]) + + """ + # For implementation details, see equation (3): + # + # Hofert, "On Sampling from the Multivariatet Distribution", 2013 + # http://rjournal.github.io/archive/2013-2/hofert.pdf + # + dim, loc, shape, df = self._process_parameters(loc, shape, df) + if random_state is not None: + rng = check_random_state(random_state) + else: + rng = self._random_state + + if np.isinf(df): + x = np.ones(size) + else: + x = rng.chisquare(df, size=size) / df + + z = rng.multivariate_normal(np.zeros(dim), shape, size=size) + samples = loc + z / np.sqrt(x)[..., None] + return _squeeze_output(samples) + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + return x + + def _process_parameters(self, loc, shape, df): + """ + Infer dimensionality from location array and shape matrix, handle + defaults, and ensure compatible dimensions. + """ + if loc is None and shape is None: + loc = np.asarray(0, dtype=float) + shape = np.asarray(1, dtype=float) + dim = 1 + elif loc is None: + shape = np.asarray(shape, dtype=float) + if shape.ndim < 2: + dim = 1 + else: + dim = shape.shape[0] + loc = np.zeros(dim) + elif shape is None: + loc = np.asarray(loc, dtype=float) + dim = loc.size + shape = np.eye(dim) + else: + shape = np.asarray(shape, dtype=float) + loc = np.asarray(loc, dtype=float) + dim = loc.size + + if dim == 1: + loc = loc.reshape(1) + shape = shape.reshape(1, 1) + + if loc.ndim != 1 or loc.shape[0] != dim: + raise ValueError("Array 'loc' must be a vector of length %d." % + dim) + if shape.ndim == 0: + shape = shape * np.eye(dim) + elif shape.ndim == 1: + shape = np.diag(shape) + elif shape.ndim == 2 and shape.shape != (dim, dim): + rows, cols = shape.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + f" but cov.shape = {str(shape.shape)}.") + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'loc' is a vector of length %d.") + msg = msg % (str(shape.shape), len(loc)) + raise ValueError(msg) + elif shape.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % shape.ndim) + + # Process degrees of freedom. + if df is None: + df = 1 + elif df <= 0: + raise ValueError("'df' must be greater than zero.") + elif np.isnan(df): + raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.") + + return dim, loc, shape, df + + +class multivariate_t_frozen(multi_rv_frozen): + + def __init__(self, loc=None, shape=1, df=1, allow_singular=False, + seed=None): + """Create a frozen multivariate t distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import multivariate_t + >>> loc = np.zeros(3) + >>> shape = np.eye(3) + >>> df = 10 + >>> dist = multivariate_t(loc, shape, df) + >>> dist.rvs() + array([[ 0.81412036, -1.53612361, 0.42199647]]) + >>> dist.pdf([1, 1, 1]) + array([0.01237803]) + + """ + self._dist = multivariate_t_gen(seed) + dim, loc, shape, df = self._dist._process_parameters(loc, shape, df) + self.dim, self.loc, self.shape, self.df = dim, loc, shape, df + self.shape_info = _PSD(shape, allow_singular=allow_singular) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + U = self.shape_info.U + log_pdet = self.shape_info.log_pdet + return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim, + self.shape_info.rank) + + def cdf(self, x, *, maxpts=None, lower_limit=None, random_state=None): + x = self._dist._process_quantiles(x, self.dim) + return self._dist._cdf(x, self.loc, self.shape, self.df, self.dim, + maxpts, lower_limit, random_state) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(loc=self.loc, + shape=self.shape, + df=self.df, + size=size, + random_state=random_state) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.shape) + + +multivariate_t = multivariate_t_gen() + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_t_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'cdf', 'entropy']: + method = multivariate_t_gen.__dict__[name] + method_frozen = multivariate_t_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvt_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params) + + +_mhg_doc_default_callparams = """\ +m : array_like + The number of each type of object in the population. + That is, :math:`m[i]` is the number of objects of + type :math:`i`. +n : array_like + The number of samples taken from the population. +""" + +_mhg_doc_callparams_note = """\ +`m` must be an array of positive integers. If the quantile +:math:`i` contains values out of the range :math:`[0, m_i]` +where :math:`m_i` is the number of objects of type :math:`i` +in the population or if the parameters are inconsistent with one +another (e.g. ``x.sum() != n``), methods return the appropriate +value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative +values, the result will contain ``nan`` there. +""" + +_mhg_doc_frozen_callparams = "" + +_mhg_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mhg_docdict_params = { + '_doc_default_callparams': _mhg_doc_default_callparams, + '_doc_callparams_note': _mhg_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mhg_docdict_noparams = { + '_doc_default_callparams': _mhg_doc_frozen_callparams, + '_doc_callparams_note': _mhg_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_hypergeom_gen(multi_rv_generic): + r"""A multivariate hypergeometric random variable. + + Methods + ------- + pmf(x, m, n) + Probability mass function. + logpmf(x, m, n) + Log of the probability mass function. + rvs(m, n, size=1, random_state=None) + Draw random samples from a multivariate hypergeometric + distribution. + mean(m, n) + Mean of the multivariate hypergeometric distribution. + var(m, n) + Variance of the multivariate hypergeometric distribution. + cov(m, n) + Compute the covariance matrix of the multivariate + hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + The probability mass function for `multivariate_hypergeom` is + + .. math:: + + P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1} + \binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad + (x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with } + \sum_{i=1}^k x_i = n + + where :math:`m_i` are the number of objects of type :math:`i`, :math:`M` + is the total number of objects in the population (sum of all the + :math:`m_i`), and :math:`n` is the size of the sample to be taken + from the population. + + .. versionadded:: 1.6.0 + + Examples + -------- + To evaluate the probability mass function of the multivariate + hypergeometric distribution, with a dichotomous population of size + :math:`10` and :math:`20`, at a sample of size :math:`12` with + :math:`8` objects of the first type and :math:`4` objects of the + second type, use: + + >>> from scipy.stats import multivariate_hypergeom + >>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12) + 0.0025207176631464523 + + The `multivariate_hypergeom` distribution is identical to the + corresponding `hypergeom` distribution (tiny numerical differences + notwithstanding) when only two types (good and bad) of objects + are present in the population as in the example above. Consider + another example for a comparison with the hypergeometric distribution: + + >>> from scipy.stats import hypergeom + >>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4) + 0.4395604395604395 + >>> hypergeom.pmf(k=3, M=15, n=4, N=10) + 0.43956043956044005 + + The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs`` + support broadcasting, under the convention that the vector parameters + (``x``, ``m``, and ``n``) are interpreted as if each row along the last + axis is a single object. For instance, we can combine the previous two + calls to `multivariate_hypergeom` as + + >>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]], + ... n=[12, 4]) + array([0.00252072, 0.43956044]) + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``m.shape[-1]``. For example: + + >>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12]) + array([[[ 1.05, -1.05], + [-1.05, 1.05]], + [[ 1.56, -1.56], + [-1.56, 1.56]]]) + + That is, ``result[0]`` is equal to + ``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal + to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``. + + Alternatively, the object may be called (as a function) to fix the `m` + and `n` parameters, returning a "frozen" multivariate hypergeometric + random variable. + + >>> rv = multivariate_hypergeom(m=[10, 20], n=12) + >>> rv.pmf(x=[8, 4]) + 0.0025207176631464523 + + See Also + -------- + scipy.stats.hypergeom : The hypergeometric distribution. + scipy.stats.multinomial : The multinomial distribution. + + References + ---------- + .. [1] The Multivariate Hypergeometric Distribution, + http://www.randomservices.org/random/urn/MultiHypergeometric.html + .. [2] Thomas J. Sargent and John Stachurski, 2020, + Multivariate Hypergeometric Distribution + https://python.quantecon.org/multi_hyper.html + """ + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params) + + def __call__(self, m, n, seed=None): + """Create a frozen multivariate_hypergeom distribution. + + See `multivariate_hypergeom_frozen` for more information. + """ + return multivariate_hypergeom_frozen(m, n, seed=seed) + + def _process_parameters(self, m, n): + m = np.asarray(m) + n = np.asarray(n) + if m.size == 0: + m = m.astype(int) + if n.size == 0: + n = n.astype(int) + if not np.issubdtype(m.dtype, np.integer): + raise TypeError("'m' must an array of integers.") + if not np.issubdtype(n.dtype, np.integer): + raise TypeError("'n' must an array of integers.") + if m.ndim == 0: + raise ValueError("'m' must be an array with" + " at least one dimension.") + + # check for empty arrays + if m.size != 0: + n = n[..., np.newaxis] + + m, n = np.broadcast_arrays(m, n) + + # check for empty arrays + if m.size != 0: + n = n[..., 0] + + mcond = m < 0 + + M = m.sum(axis=-1) + + ncond = (n < 0) | (n > M) + return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond + + def _process_quantiles(self, x, M, m, n): + x = np.asarray(x) + if not np.issubdtype(x.dtype, np.integer): + raise TypeError("'x' must an array of integers.") + if x.ndim == 0: + raise ValueError("'x' must be an array with" + " at least one dimension.") + if not x.shape[-1] == m.shape[-1]: + raise ValueError(f"Size of each quantile must be size of 'm': " + f"received {x.shape[-1]}, " + f"but expected {m.shape[-1]}.") + + # check for empty arrays + if m.size != 0: + n = n[..., np.newaxis] + M = M[..., np.newaxis] + + x, m, n, M = np.broadcast_arrays(x, m, n, M) + + # check for empty arrays + if m.size != 0: + n, M = n[..., 0], M[..., 0] + + xcond = (x < 0) | (x > m) + return (x, M, m, n, xcond, + np.any(xcond, axis=-1) | (x.sum(axis=-1) != n)) + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + return bad_value + if result.ndim == 0: + return result[()] + return result + + def _logpmf(self, x, M, m, n, mxcond, ncond): + # This equation of the pmf comes from the relation, + # n combine r = beta(n+1, 1) / beta(r+1, n-r+1) + num = np.zeros_like(m, dtype=np.float64) + den = np.zeros_like(n, dtype=np.float64) + m, x = m[~mxcond], x[~mxcond] + M, n = M[~ncond], n[~ncond] + num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1)) + den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1)) + num[mxcond] = np.nan + den[ncond] = np.nan + num = num.sum(axis=-1) + return num - den + + def logpmf(self, x, m, n): + """Log of the multivariate hypergeometric probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + M, m, n, mcond, ncond, mncond = self._process_parameters(m, n) + (x, M, m, n, xcond, + xcond_reduced) = self._process_quantiles(x, M, m, n) + mxcond = mcond | xcond + ncond = ncond | np.zeros(n.shape, dtype=np.bool_) + + result = self._logpmf(x, M, m, n, mxcond, ncond) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, -np.inf) + + # replace values bad for n or m; broadcast + # mncond to the right shape + mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_) + return self._checkresult(result, mncond_, np.nan) + + def pmf(self, x, m, n): + """Multivariate hypergeometric probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + out = np.exp(self.logpmf(x, m, n)) + return out + + def mean(self, m, n): + """Mean of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : array_like or scalar + The mean of the distribution + """ + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M, n = M[..., np.newaxis], n[..., np.newaxis] + cond = (M == 0) + M = np.ma.masked_array(M, mask=cond) + mu = n*(m/M) + if m.size != 0: + mncond = (mncond[..., np.newaxis] | + np.zeros(mu.shape, dtype=np.bool_)) + return self._checkresult(mu, mncond, np.nan) + + def var(self, m, n): + """Variance of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + array_like + The variances of the components of the distribution. This is + the diagonal of the covariance matrix of the distribution + """ + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M, n = M[..., np.newaxis], n[..., np.newaxis] + cond = (M == 0) & (M-1 == 0) + M = np.ma.masked_array(M, mask=cond) + output = n * m/M * (M-m)/M * (M-n)/(M-1) + if m.size != 0: + mncond = (mncond[..., np.newaxis] | + np.zeros(output.shape, dtype=np.bool_)) + return self._checkresult(output, mncond, np.nan) + + def cov(self, m, n): + """Covariance matrix of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : array_like + The covariance matrix of the distribution + """ + # see [1]_ for the formula and [2]_ for implementation + # cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2) + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M = M[..., np.newaxis, np.newaxis] + n = n[..., np.newaxis, np.newaxis] + cond = (M == 0) & (M-1 == 0) + M = np.ma.masked_array(M, mask=cond) + output = (-n * (M-n)/(M-1) * + np.einsum("...i,...j->...ij", m, m) / (M**2)) + # check for empty arrays + if m.size != 0: + M, n = M[..., 0, 0], n[..., 0, 0] + cond = cond[..., 0, 0] + dim = m.shape[-1] + # diagonal entries need to be computed differently + for i in range(dim): + output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i])) + output[..., i, i] = output[..., i, i] / (M-1) + output[..., i, i] = output[..., i, i] / (M**2) + if m.size != 0: + mncond = (mncond[..., np.newaxis, np.newaxis] | + np.zeros(output.shape, dtype=np.bool_)) + return self._checkresult(output, mncond, np.nan) + + def rvs(self, m, n, size=None, random_state=None): + """Draw random samples from a multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw. Default is ``None``, in which case a + single variate is returned as an array with shape ``m.shape``. + %(_doc_random_state)s + + Returns + ------- + rvs : array_like + Random variates of shape ``size`` or ``m.shape`` + (if ``size=None``). + + Notes + ----- + %(_doc_callparams_note)s + + Also note that NumPy's `multivariate_hypergeometric` sampler is not + used as it doesn't support broadcasting. + """ + M, m, n, _, _, _ = self._process_parameters(m, n) + + random_state = self._get_random_state(random_state) + + if size is not None and isinstance(size, int): + size = (size, ) + + if size is None: + rvs = np.empty(m.shape, dtype=m.dtype) + else: + rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype) + rem = M + + # This sampler has been taken from numpy gh-13794 + # https://github.com/numpy/numpy/pull/13794 + for c in range(m.shape[-1] - 1): + rem = rem - m[..., c] + n0mask = n == 0 + rvs[..., c] = (~n0mask * + random_state.hypergeometric(m[..., c], + rem + n0mask, + n + n0mask, + size=size)) + n = n - rvs[..., c] + rvs[..., m.shape[-1] - 1] = n + + return rvs + + +multivariate_hypergeom = multivariate_hypergeom_gen() + + +class multivariate_hypergeom_frozen(multi_rv_frozen): + def __init__(self, m, n, seed=None): + self._dist = multivariate_hypergeom_gen(seed) + (self.M, self.m, self.n, + self.mcond, self.ncond, + self.mncond) = self._dist._process_parameters(m, n) + + # monkey patch self._dist + def _process_parameters(m, n): + return (self.M, self.m, self.n, + self.mcond, self.ncond, + self.mncond) + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.m, self.n) + + def pmf(self, x): + return self._dist.pmf(x, self.m, self.n) + + def mean(self): + return self._dist.mean(self.m, self.n) + + def var(self): + return self._dist.var(self.m, self.n) + + def cov(self): + return self._dist.cov(self.m, self.n) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.m, self.n, + size=size, + random_state=random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_hypergeom and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']: + method = multivariate_hypergeom_gen.__dict__[name] + method_frozen = multivariate_hypergeom_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, mhg_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + mhg_docdict_params) + + +class random_table_gen(multi_rv_generic): + r"""Contingency tables from independent samples with fixed marginal sums. + + This is the distribution of random tables with given row and column vector + sums. This distribution represents the set of random tables under the null + hypothesis that rows and columns are independent. It is used in hypothesis + tests of independence. + + Because of assumed independence, the expected frequency of each table + element can be computed from the row and column sums, so that the + distribution is completely determined by these two vectors. + + Methods + ------- + logpmf(x) + Log-probability of table `x` to occur in the distribution. + pmf(x) + Probability of table `x` to occur in the distribution. + mean(row, col) + Mean table. + rvs(row, col, size=None, method=None, random_state=None) + Draw random tables with given row and column vector sums. + + Parameters + ---------- + %(_doc_row_col)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_row_col_note)s + + Random elements from the distribution are generated either with Boyett's + [1]_ or Patefield's algorithm [2]_. Boyett's algorithm has + O(N) time and space complexity, where N is the total sum of entries in the + table. Patefield's algorithm has O(K x log(N)) time complexity, where K is + the number of cells in the table and requires only a small constant work + space. By default, the `rvs` method selects the fastest algorithm based on + the input, but you can specify the algorithm with the keyword `method`. + Allowed values are "boyett" and "patefield". + + .. versionadded:: 1.10.0 + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.mean(row, col) + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> dist = random_table(row, col) + >>> dist.rvs(random_state=123) + array([[1, 0, 0], + [1, 3, 1]]) + + References + ---------- + .. [1] J. Boyett, AS 144 Appl. Statist. 28 (1979) 329-332 + .. [2] W.M. Patefield, AS 159 Appl. Statist. 30 (1981) 91-97 + """ + + def __init__(self, seed=None): + super().__init__(seed) + + def __call__(self, row, col, *, seed=None): + """Create a frozen distribution of tables with given marginals. + + See `random_table_frozen` for more information. + """ + return random_table_frozen(row, col, seed=seed) + + def logpmf(self, x, row, col): + """Log-probability of table to occur in the distribution. + + Parameters + ---------- + %(_doc_x)s + %(_doc_row_col)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x`. + + Notes + ----- + %(_doc_row_col_note)s + + If row and column marginals of `x` do not match `row` and `col`, + negative infinity is returned. + + Examples + -------- + >>> from scipy.stats import random_table + >>> import numpy as np + + >>> x = [[1, 5, 1], [2, 3, 1]] + >>> row = np.sum(x, axis=1) + >>> col = np.sum(x, axis=0) + >>> random_table.logpmf(x, row, col) + -1.6306401200847027 + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.logpmf(x) + -1.6306401200847027 + """ + r, c, n = self._process_parameters(row, col) + x = np.asarray(x) + + if x.ndim < 2: + raise ValueError("`x` must be at least two-dimensional") + + dtype_is_int = np.issubdtype(x.dtype, np.integer) + with np.errstate(invalid='ignore'): + if not dtype_is_int and not np.all(x.astype(int) == x): + raise ValueError("`x` must contain only integral values") + + # x does not contain NaN if we arrive here + if np.any(x < 0): + raise ValueError("`x` must contain only non-negative values") + + r2 = np.sum(x, axis=-1) + c2 = np.sum(x, axis=-2) + + if r2.shape[-1] != len(r): + raise ValueError("shape of `x` must agree with `row`") + + if c2.shape[-1] != len(c): + raise ValueError("shape of `x` must agree with `col`") + + res = np.empty(x.shape[:-2]) + + mask = np.all(r2 == r, axis=-1) & np.all(c2 == c, axis=-1) + + def lnfac(x): + return gammaln(x + 1) + + res[mask] = (np.sum(lnfac(r), axis=-1) + np.sum(lnfac(c), axis=-1) + - lnfac(n) - np.sum(lnfac(x[mask]), axis=(-1, -2))) + res[~mask] = -np.inf + + return res[()] + + def pmf(self, x, row, col): + """Probability of table to occur in the distribution. + + Parameters + ---------- + %(_doc_x)s + %(_doc_row_col)s + + Returns + ------- + pmf : ndarray or scalar + Probability mass function evaluated at `x`. + + Notes + ----- + %(_doc_row_col_note)s + + If row and column marginals of `x` do not match `row` and `col`, + zero is returned. + + Examples + -------- + >>> from scipy.stats import random_table + >>> import numpy as np + + >>> x = [[1, 5, 1], [2, 3, 1]] + >>> row = np.sum(x, axis=1) + >>> col = np.sum(x, axis=0) + >>> random_table.pmf(x, row, col) + 0.19580419580419592 + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.pmf(x) + 0.19580419580419592 + """ + return np.exp(self.logpmf(x, row, col)) + + def mean(self, row, col): + """Mean of distribution of conditional tables. + %(_doc_mean_params)s + + Returns + ------- + mean: ndarray + Mean of the distribution. + + Notes + ----- + %(_doc_row_col_note)s + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.mean(row, col) + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.mean() + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + """ + r, c, n = self._process_parameters(row, col) + return np.outer(r, c) / n + + def rvs(self, row, col, *, size=None, method=None, random_state=None): + """Draw random tables with fixed column and row marginals. + + Parameters + ---------- + %(_doc_row_col)s + size : integer, optional + Number of samples to draw (default 1). + method : str, optional + Which method to use, "boyett" or "patefield". If None (default), + selects the fastest method for this input. + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random 2D tables of shape (`size`, `len(row)`, `len(col)`). + + Notes + ----- + %(_doc_row_col_note)s + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.rvs(row, col, random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.rvs(random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + """ + r, c, n = self._process_parameters(row, col) + size, shape = self._process_size_shape(size, r, c) + + random_state = self._get_random_state(random_state) + meth = self._process_rvs_method(method, r, c, n) + + return meth(r, c, n, size, random_state).reshape(shape) + + @staticmethod + def _process_parameters(row, col): + """ + Check that row and column vectors are one-dimensional, that they do + not contain negative or non-integer entries, and that the sums over + both vectors are equal. + """ + r = np.array(row, dtype=np.int64, copy=True) + c = np.array(col, dtype=np.int64, copy=True) + + if np.ndim(r) != 1: + raise ValueError("`row` must be one-dimensional") + if np.ndim(c) != 1: + raise ValueError("`col` must be one-dimensional") + + if np.any(r < 0): + raise ValueError("each element of `row` must be non-negative") + if np.any(c < 0): + raise ValueError("each element of `col` must be non-negative") + + n = np.sum(r) + if n != np.sum(c): + raise ValueError("sums over `row` and `col` must be equal") + + if not np.all(r == np.asarray(row)): + raise ValueError("each element of `row` must be an integer") + if not np.all(c == np.asarray(col)): + raise ValueError("each element of `col` must be an integer") + + return r, c, n + + @staticmethod + def _process_size_shape(size, r, c): + """ + Compute the number of samples to be drawn and the shape of the output + """ + shape = (len(r), len(c)) + + if size is None: + return 1, shape + + size = np.atleast_1d(size) + if not np.issubdtype(size.dtype, np.integer) or np.any(size < 0): + raise ValueError("`size` must be a non-negative integer or `None`") + + return np.prod(size), tuple(size) + shape + + @classmethod + def _process_rvs_method(cls, method, r, c, n): + known_methods = { + None: cls._rvs_select(r, c, n), + "boyett": cls._rvs_boyett, + "patefield": cls._rvs_patefield, + } + try: + return known_methods[method] + except KeyError: + raise ValueError(f"'{method}' not recognized, " + f"must be one of {set(known_methods)}") + + @classmethod + def _rvs_select(cls, r, c, n): + fac = 1.0 # benchmarks show that this value is about 1 + k = len(r) * len(c) # number of cells + # n + 1 guards against failure if n == 0 + if n > fac * np.log(n + 1) * k: + return cls._rvs_patefield + return cls._rvs_boyett + + @staticmethod + def _rvs_boyett(row, col, ntot, size, random_state): + return _rcont.rvs_rcont1(row, col, ntot, size, random_state) + + @staticmethod + def _rvs_patefield(row, col, ntot, size, random_state): + return _rcont.rvs_rcont2(row, col, ntot, size, random_state) + + +random_table = random_table_gen() + + +class random_table_frozen(multi_rv_frozen): + def __init__(self, row, col, *, seed=None): + self._dist = random_table_gen(seed) + self._params = self._dist._process_parameters(row, col) + + # monkey patch self._dist + def _process_parameters(r, c): + return self._params + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, None, None) + + def pmf(self, x): + return self._dist.pmf(x, None, None) + + def mean(self): + return self._dist.mean(None, None) + + def rvs(self, size=None, method=None, random_state=None): + # optimisations are possible here + return self._dist.rvs(None, None, size=size, method=method, + random_state=random_state) + + +_ctab_doc_row_col = """\ +row : array_like + Sum of table entries in each row. +col : array_like + Sum of table entries in each column.""" + +_ctab_doc_x = """\ +x : array-like + Two-dimensional table of non-negative integers, or a + multi-dimensional array with the last two dimensions + corresponding with the tables.""" + +_ctab_doc_row_col_note = """\ +The row and column vectors must be one-dimensional, not empty, +and each sum up to the same value. They cannot contain negative +or noninteger entries.""" + +_ctab_doc_mean_params = f""" +Parameters +---------- +{_ctab_doc_row_col}""" + +_ctab_doc_row_col_note_frozen = """\ +See class definition for a detailed description of parameters.""" + +_ctab_docdict = { + "_doc_random_state": _doc_random_state, + "_doc_row_col": _ctab_doc_row_col, + "_doc_x": _ctab_doc_x, + "_doc_mean_params": _ctab_doc_mean_params, + "_doc_row_col_note": _ctab_doc_row_col_note, +} + +_ctab_docdict_frozen = _ctab_docdict.copy() +_ctab_docdict_frozen.update({ + "_doc_row_col": "", + "_doc_mean_params": "", + "_doc_row_col_note": _ctab_doc_row_col_note_frozen, +}) + + +def _docfill(obj, docdict, template=None): + obj.__doc__ = doccer.docformat(template or obj.__doc__, docdict) + + +# Set frozen generator docstrings from corresponding docstrings in +# random_table and fill in default strings in class docstrings +_docfill(random_table_gen, _ctab_docdict) +for name in ['logpmf', 'pmf', 'mean', 'rvs']: + method = random_table_gen.__dict__[name] + method_frozen = random_table_frozen.__dict__[name] + _docfill(method_frozen, _ctab_docdict_frozen, method.__doc__) + _docfill(method, _ctab_docdict) + + +class uniform_direction_gen(multi_rv_generic): + r"""A vector-valued uniform direction. + + Return a random direction (unit vector). The `dim` keyword specifies + the dimensionality of the space. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random directions. + + Parameters + ---------- + dim : scalar + Dimension of directions. + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This distribution generates unit vectors uniformly distributed on + the surface of a hypersphere. These can be interpreted as random + directions. + For example, if `dim` is 3, 3D vectors from the surface of :math:`S^2` + will be sampled. + + References + ---------- + .. [1] Marsaglia, G. (1972). "Choosing a Point from the Surface of a + Sphere". Annals of Mathematical Statistics. 43 (2): 645-646. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import uniform_direction + >>> x = uniform_direction.rvs(3) + >>> np.linalg.norm(x) + 1. + + This generates one random direction, a vector on the surface of + :math:`S^2`. + + Alternatively, the object may be called (as a function) to return a frozen + distribution with fixed `dim` parameter. Here, + we create a `uniform_direction` with ``dim=3`` and draw 5 observations. + The samples are then arranged in an array of shape 5x3. + + >>> rng = np.random.default_rng() + >>> uniform_sphere_dist = uniform_direction(3) + >>> unit_vectors = uniform_sphere_dist.rvs(5, random_state=rng) + >>> unit_vectors + array([[ 0.56688642, -0.1332634 , -0.81294566], + [-0.427126 , -0.74779278, 0.50830044], + [ 0.3793989 , 0.92346629, 0.05715323], + [ 0.36428383, -0.92449076, -0.11231259], + [-0.27733285, 0.94410968, -0.17816678]]) + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen n-dimensional uniform direction distribution. + + See `uniform_direction` for more information. + """ + return uniform_direction_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim < 1 or dim != int(dim): + raise ValueError("Dimension of vector must be specified, " + "and must be an integer greater than 0.") + + return int(dim) + + def rvs(self, dim, size=None, random_state=None): + """Draw random samples from S(N-1). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + rvs : ndarray + Random direction vectors + + """ + random_state = self._get_random_state(random_state) + if size is None: + size = np.array([], dtype=int) + size = np.atleast_1d(size) + + dim = self._process_parameters(dim) + + samples = _sample_uniform_direction(dim, size, random_state) + return samples + + +uniform_direction = uniform_direction_gen() + + +class uniform_direction_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen n-dimensional uniform direction distribution. + + Parameters + ---------- + dim : int + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import uniform_direction + >>> x = uniform_direction(3) + >>> x.rvs() + + """ + self._dist = uniform_direction_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=None, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +def _sample_uniform_direction(dim, size, random_state): + """ + Private method to generate uniform directions + Reference: Marsaglia, G. (1972). "Choosing a Point from the Surface of a + Sphere". Annals of Mathematical Statistics. 43 (2): 645-646. + """ + samples_shape = np.append(size, dim) + samples = random_state.standard_normal(samples_shape) + samples /= np.linalg.norm(samples, axis=-1, keepdims=True) + return samples + + +_dirichlet_mn_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries along the last axis + determines the dimensionality of the distribution. Each entry must be + strictly positive. +n : int or array_like + The number of trials. Each element must be a strictly positive integer. +""" + +_dirichlet_mn_doc_frozen_callparams = "" + +_dirichlet_mn_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +dirichlet_mn_docdict_params = { + '_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_mn_docdict_noparams = { + '_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_multinomial_check_parameters(alpha, n, x=None): + + alpha = np.asarray(alpha) + n = np.asarray(n) + + if x is not None: + # Ensure that `x` and `alpha` are arrays. If the shapes are + # incompatible, NumPy will raise an appropriate error. + try: + x, alpha = np.broadcast_arrays(x, alpha) + except ValueError as e: + msg = "`x` and `alpha` must be broadcastable." + raise ValueError(msg) from e + + x_int = np.floor(x) + if np.any(x < 0) or np.any(x != x_int): + raise ValueError("`x` must contain only non-negative integers.") + x = x_int + + if np.any(alpha <= 0): + raise ValueError("`alpha` must contain only positive values.") + + n_int = np.floor(n) + if np.any(n <= 0) or np.any(n != n_int): + raise ValueError("`n` must be a positive integer.") + n = n_int + + sum_alpha = np.sum(alpha, axis=-1) + sum_alpha, n = np.broadcast_arrays(sum_alpha, n) + + return (alpha, sum_alpha, n) if x is None else (alpha, sum_alpha, n, x) + + +class dirichlet_multinomial_gen(multi_rv_generic): + r"""A Dirichlet multinomial random variable. + + The Dirichlet multinomial distribution is a compound probability + distribution: it is the multinomial distribution with number of trials + `n` and class probabilities ``p`` randomly sampled from a Dirichlet + distribution with concentration parameters ``alpha``. + + Methods + ------- + logpmf(x, alpha, n): + Log of the probability mass function. + pmf(x, alpha, n): + Probability mass function. + mean(alpha, n): + Mean of the Dirichlet multinomial distribution. + var(alpha, n): + Variance of the Dirichlet multinomial distribution. + cov(alpha, n): + The covariance of the Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + %(_doc_random_state)s + + See Also + -------- + scipy.stats.dirichlet : The dirichlet distribution. + scipy.stats.multinomial : The multinomial distribution. + + References + ---------- + .. [1] Dirichlet-multinomial distribution, Wikipedia, + https://www.wikipedia.org/wiki/Dirichlet-multinomial_distribution + + Examples + -------- + >>> from scipy.stats import dirichlet_multinomial + + Get the PMF + + >>> n = 6 # number of trials + >>> alpha = [3, 4, 5] # concentration parameters + >>> x = [1, 2, 3] # counts + >>> dirichlet_multinomial.pmf(x, alpha, n) + 0.08484162895927604 + + If the sum of category counts does not equal the number of trials, + the probability mass is zero. + + >>> dirichlet_multinomial.pmf(x, alpha, n=7) + 0.0 + + Get the log of the PMF + + >>> dirichlet_multinomial.logpmf(x, alpha, n) + -2.4669689491013327 + + Get the mean + + >>> dirichlet_multinomial.mean(alpha, n) + array([1.5, 2. , 2.5]) + + Get the variance + + >>> dirichlet_multinomial.var(alpha, n) + array([1.55769231, 1.84615385, 2.01923077]) + + Get the covariance + + >>> dirichlet_multinomial.cov(alpha, n) + array([[ 1.55769231, -0.69230769, -0.86538462], + [-0.69230769, 1.84615385, -1.15384615], + [-0.86538462, -1.15384615, 2.01923077]]) + + Alternatively, the object may be called (as a function) to fix the + `alpha` and `n` parameters, returning a "frozen" Dirichlet multinomial + random variable. + + >>> dm = dirichlet_multinomial(alpha, n) + >>> dm.pmf(x) + 0.08484162895927579 + + All methods are fully vectorized. Each element of `x` and `alpha` is + a vector (along the last axis), each element of `n` is an + integer (scalar), and the result is computed element-wise. + + >>> x = [[1, 2, 3], [4, 5, 6]] + >>> alpha = [[1, 2, 3], [4, 5, 6]] + >>> n = [6, 15] + >>> dirichlet_multinomial.pmf(x, alpha, n) + array([0.06493506, 0.02626937]) + + >>> dirichlet_multinomial.cov(alpha, n).shape # both covariance matrices + (2, 3, 3) + + Broadcasting according to standard NumPy conventions is supported. Here, + we have four sets of concentration parameters (each a two element vector) + for each of three numbers of trials (each a scalar). + + >>> alpha = [[3, 4], [4, 5], [5, 6], [6, 7]] + >>> n = [[6], [7], [8]] + >>> dirichlet_multinomial.mean(alpha, n).shape + (3, 4, 2) + + """ + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, + dirichlet_mn_docdict_params) + + def __call__(self, alpha, n, seed=None): + return dirichlet_multinomial_frozen(alpha, n, seed=seed) + + def logpmf(self, x, alpha, n): + """The log of the probability mass function. + + Parameters + ---------- + x: ndarray + Category counts (non-negative integers). Must be broadcastable + with shape parameter ``alpha``. If multidimensional, the last axis + must correspond with the categories. + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray or scalar + Log of the probability mass function. + + """ + + a, Sa, n, x = _dirichlet_multinomial_check_parameters(alpha, n, x) + + out = np.asarray(loggamma(Sa) + loggamma(n + 1) - loggamma(n + Sa)) + out += (loggamma(x + a) - (loggamma(a) + loggamma(x + 1))).sum(axis=-1) + np.place(out, n != x.sum(axis=-1), -np.inf) + return out[()] + + def pmf(self, x, alpha, n): + """Probability mass function for a Dirichlet multinomial distribution. + + Parameters + ---------- + x: ndarray + Category counts (non-negative integers). Must be broadcastable + with shape parameter ``alpha``. If multidimensional, the last axis + must correspond with the categories. + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray or scalar + Probability mass function. + + """ + return np.exp(self.logpmf(x, alpha, n)) + + def mean(self, alpha, n): + """Mean of a Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray + Mean of a Dirichlet multinomial distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + n, Sa = n[..., np.newaxis], Sa[..., np.newaxis] + return n * a / Sa + + def var(self, alpha, n): + """The variance of the Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: array_like + The variances of the components of the distribution. This is + the diagonal of the covariance matrix of the distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + n, Sa = n[..., np.newaxis], Sa[..., np.newaxis] + return n * a / Sa * (1 - a/Sa) * (n + Sa) / (1 + Sa) + + def cov(self, alpha, n): + """Covariance matrix of a Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out : array_like + The covariance matrix of the distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + var = dirichlet_multinomial.var(a, n) + + n, Sa = n[..., np.newaxis, np.newaxis], Sa[..., np.newaxis, np.newaxis] + aiaj = a[..., :, np.newaxis] * a[..., np.newaxis, :] + cov = -n * aiaj / Sa ** 2 * (n + Sa) / (1 + Sa) + + ii = np.arange(cov.shape[-1]) + cov[..., ii, ii] = var + return cov + + +dirichlet_multinomial = dirichlet_multinomial_gen() + + +class dirichlet_multinomial_frozen(multi_rv_frozen): + def __init__(self, alpha, n, seed=None): + alpha, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + self.alpha = alpha + self.n = n + self._dist = dirichlet_multinomial_gen(seed) + + def logpmf(self, x): + return self._dist.logpmf(x, self.alpha, self.n) + + def pmf(self, x): + return self._dist.pmf(x, self.alpha, self.n) + + def mean(self): + return self._dist.mean(self.alpha, self.n) + + def var(self): + return self._dist.var(self.alpha, self.n) + + def cov(self): + return self._dist.cov(self.alpha, self.n) + + +# Set frozen generator docstrings from corresponding docstrings in +# dirichlet_multinomial and fill in default strings in class docstrings. +for name in ['logpmf', 'pmf', 'mean', 'var', 'cov']: + method = dirichlet_multinomial_gen.__dict__[name] + method_frozen = dirichlet_multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_mn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + dirichlet_mn_docdict_params) + + +class vonmises_fisher_gen(multi_rv_generic): + r"""A von Mises-Fisher variable. + + The `mu` keyword specifies the mean direction vector. The `kappa` keyword + specifies the concentration parameter. + + Methods + ------- + pdf(x, mu=None, kappa=1) + Probability density function. + logpdf(x, mu=None, kappa=1) + Log of the probability density function. + rvs(mu=None, kappa=1, size=1, random_state=None) + Draw random samples from a von Mises-Fisher distribution. + entropy(mu=None, kappa=1) + Compute the differential entropy of the von Mises-Fisher distribution. + fit(data) + Fit a von Mises-Fisher distribution to data. + + Parameters + ---------- + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + See Also + -------- + scipy.stats.vonmises : Von-Mises Fisher distribution in 2D on a circle + uniform_direction : uniform distribution on the surface of a hypersphere + + Notes + ----- + The von Mises-Fisher distribution is a directional distribution on the + surface of the unit hypersphere. The probability density + function of a unit vector :math:`\mathbf{x}` is + + .. math:: + + f(\mathbf{x}) = \frac{\kappa^{d/2-1}}{(2\pi)^{d/2}I_{d/2-1}(\kappa)} + \exp\left(\kappa \mathbf{\mu}^T\mathbf{x}\right), + + where :math:`\mathbf{\mu}` is the mean direction, :math:`\kappa` the + concentration parameter, :math:`d` the dimension and :math:`I` the + modified Bessel function of the first kind. As :math:`\mu` represents + a direction, it must be a unit vector or in other words, a point + on the hypersphere: :math:`\mathbf{\mu}\in S^{d-1}`. :math:`\kappa` is a + concentration parameter, which means that it must be positive + (:math:`\kappa>0`) and that the distribution becomes more narrow with + increasing :math:`\kappa`. In that sense, the reciprocal value + :math:`1/\kappa` resembles the variance parameter of the normal + distribution. + + The von Mises-Fisher distribution often serves as an analogue of the + normal distribution on the sphere. Intuitively, for unit vectors, a + useful distance measure is given by the angle :math:`\alpha` between + them. This is exactly what the scalar product + :math:`\mathbf{\mu}^T\mathbf{x}=\cos(\alpha)` in the + von Mises-Fisher probability density function describes: the angle + between the mean direction :math:`\mathbf{\mu}` and the vector + :math:`\mathbf{x}`. The larger the angle between them, the smaller the + probability to observe :math:`\mathbf{x}` for this particular mean + direction :math:`\mathbf{\mu}`. + + In dimensions 2 and 3, specialized algorithms are used for fast sampling + [2]_, [3]_. For dimensions of 4 or higher the rejection sampling algorithm + described in [4]_ is utilized. This implementation is partially based on + the geomstats package [5]_, [6]_. + + .. versionadded:: 1.11 + + References + ---------- + .. [1] Von Mises-Fisher distribution, Wikipedia, + https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution + .. [2] Mardia, K., and Jupp, P. Directional statistics. Wiley, 2000. + .. [3] J. Wenzel. Numerically stable sampling of the von Mises Fisher + distribution on S2. + https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf + .. [4] Wood, A. Simulation of the von mises fisher distribution. + Communications in statistics-simulation and computation 23, + 1 (1994), 157-164. https://doi.org/10.1080/03610919408813161 + .. [5] geomstats, Github. MIT License. Accessed: 06.01.2023. + https://github.com/geomstats/geomstats + .. [6] Miolane, N. et al. Geomstats: A Python Package for Riemannian + Geometry in Machine Learning. Journal of Machine Learning Research + 21 (2020). http://jmlr.org/papers/v21/19-027.html + + Examples + -------- + **Visualization of the probability density** + + Plot the probability density in three dimensions for increasing + concentration parameter. The density is calculated by the ``pdf`` + method. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import vonmises_fisher + >>> from matplotlib.colors import Normalize + >>> n_grid = 100 + >>> u = np.linspace(0, np.pi, n_grid) + >>> v = np.linspace(0, 2 * np.pi, n_grid) + >>> u_grid, v_grid = np.meshgrid(u, v) + >>> vertices = np.stack([np.cos(v_grid) * np.sin(u_grid), + ... np.sin(v_grid) * np.sin(u_grid), + ... np.cos(u_grid)], + ... axis=2) + >>> x = np.outer(np.cos(v), np.sin(u)) + >>> y = np.outer(np.sin(v), np.sin(u)) + >>> z = np.outer(np.ones_like(u), np.cos(u)) + >>> def plot_vmf_density(ax, x, y, z, vertices, mu, kappa): + ... vmf = vonmises_fisher(mu, kappa) + ... pdf_values = vmf.pdf(vertices) + ... pdfnorm = Normalize(vmin=pdf_values.min(), vmax=pdf_values.max()) + ... ax.plot_surface(x, y, z, rstride=1, cstride=1, + ... facecolors=plt.cm.viridis(pdfnorm(pdf_values)), + ... linewidth=0) + ... ax.set_aspect('equal') + ... ax.view_init(azim=-130, elev=0) + ... ax.axis('off') + ... ax.set_title(rf"$\kappa={kappa}$") + >>> fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4), + ... subplot_kw={"projection": "3d"}) + >>> left, middle, right = axes + >>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0]) + >>> plot_vmf_density(left, x, y, z, vertices, mu, 5) + >>> plot_vmf_density(middle, x, y, z, vertices, mu, 20) + >>> plot_vmf_density(right, x, y, z, vertices, mu, 100) + >>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, right=1.0, wspace=0.) + >>> plt.show() + + As we increase the concentration parameter, the points are getting more + clustered together around the mean direction. + + **Sampling** + + Draw 5 samples from the distribution using the ``rvs`` method resulting + in a 5x3 array. + + >>> rng = np.random.default_rng() + >>> mu = np.array([0, 0, 1]) + >>> samples = vonmises_fisher(mu, 20).rvs(5, random_state=rng) + >>> samples + array([[ 0.3884594 , -0.32482588, 0.86231516], + [ 0.00611366, -0.09878289, 0.99509023], + [-0.04154772, -0.01637135, 0.99900239], + [-0.14613735, 0.12553507, 0.98126695], + [-0.04429884, -0.23474054, 0.97104814]]) + + These samples are unit vectors on the sphere :math:`S^2`. To verify, + let us calculate their euclidean norms: + + >>> np.linalg.norm(samples, axis=1) + array([1., 1., 1., 1., 1.]) + + Plot 20 observations drawn from the von Mises-Fisher distribution for + increasing concentration parameter :math:`\kappa`. The red dot highlights + the mean direction :math:`\mu`. + + >>> def plot_vmf_samples(ax, x, y, z, mu, kappa): + ... vmf = vonmises_fisher(mu, kappa) + ... samples = vmf.rvs(20) + ... ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0, + ... alpha=0.2) + ... ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c='k', s=5) + ... ax.scatter(mu[0], mu[1], mu[2], c='r', s=30) + ... ax.set_aspect('equal') + ... ax.view_init(azim=-130, elev=0) + ... ax.axis('off') + ... ax.set_title(rf"$\kappa={kappa}$") + >>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0]) + >>> fig, axes = plt.subplots(nrows=1, ncols=3, + ... subplot_kw={"projection": "3d"}, + ... figsize=(9, 4)) + >>> left, middle, right = axes + >>> plot_vmf_samples(left, x, y, z, mu, 5) + >>> plot_vmf_samples(middle, x, y, z, mu, 20) + >>> plot_vmf_samples(right, x, y, z, mu, 100) + >>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, + ... right=1.0, wspace=0.) + >>> plt.show() + + The plots show that with increasing concentration :math:`\kappa` the + resulting samples are centered more closely around the mean direction. + + **Fitting the distribution parameters** + + The distribution can be fitted to data using the ``fit`` method returning + the estimated parameters. As a toy example let's fit the distribution to + samples drawn from a known von Mises-Fisher distribution. + + >>> mu, kappa = np.array([0, 0, 1]), 20 + >>> samples = vonmises_fisher(mu, kappa).rvs(1000, random_state=rng) + >>> mu_fit, kappa_fit = vonmises_fisher.fit(samples) + >>> mu_fit, kappa_fit + (array([0.01126519, 0.01044501, 0.99988199]), 19.306398751730995) + + We see that the estimated parameters `mu_fit` and `kappa_fit` are + very close to the ground truth parameters. + + """ + def __init__(self, seed=None): + super().__init__(seed) + + def __call__(self, mu=None, kappa=1, seed=None): + """Create a frozen von Mises-Fisher distribution. + + See `vonmises_fisher_frozen` for more information. + """ + return vonmises_fisher_frozen(mu, kappa, seed=seed) + + def _process_parameters(self, mu, kappa): + """ + Infer dimensionality from mu and ensure that mu is a one-dimensional + unit vector and kappa positive. + """ + mu = np.asarray(mu) + if mu.ndim > 1: + raise ValueError("'mu' must have one-dimensional shape.") + if not np.allclose(np.linalg.norm(mu), 1.): + raise ValueError("'mu' must be a unit vector of norm 1.") + if not mu.size > 1: + raise ValueError("'mu' must have at least two entries.") + kappa_error_msg = "'kappa' must be a positive scalar." + if not np.isscalar(kappa) or kappa < 0: + raise ValueError(kappa_error_msg) + if float(kappa) == 0.: + raise ValueError("For 'kappa=0' the von Mises-Fisher distribution " + "becomes the uniform distribution on the sphere " + "surface. Consider using " + "'scipy.stats.uniform_direction' instead.") + dim = mu.size + + return dim, mu, kappa + + def _check_data_vs_dist(self, x, dim): + if x.shape[-1] != dim: + raise ValueError("The dimensionality of the last axis of 'x' must " + "match the dimensionality of the " + "von Mises Fisher distribution.") + if not np.allclose(np.linalg.norm(x, axis=-1), 1.): + msg = "'x' must be unit vectors of norm 1 along last dimension." + raise ValueError(msg) + + def _log_norm_factor(self, dim, kappa): + # normalization factor is given by + # c = kappa**(dim/2-1)/((2*pi)**(dim/2)*I[dim/2-1](kappa)) + # = kappa**(dim/2-1)*exp(-kappa) / + # ((2*pi)**(dim/2)*I[dim/2-1](kappa)*exp(-kappa) + # = kappa**(dim/2-1)*exp(-kappa) / + # ((2*pi)**(dim/2)*ive[dim/2-1](kappa) + # Then the log is given by + # log c = 1/2*(dim -1)*log(kappa) - kappa - -1/2*dim*ln(2*pi) - + # ive[dim/2-1](kappa) + halfdim = 0.5 * dim + return (0.5 * (dim - 2)*np.log(kappa) - halfdim * _LOG_2PI - + np.log(ive(halfdim - 1, kappa)) - kappa) + + def _logpdf(self, x, dim, mu, kappa): + """Log of the von Mises-Fisher probability density function. + + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + x = np.asarray(x) + self._check_data_vs_dist(x, dim) + dotproducts = np.einsum('i,...i->...', mu, x) + return self._log_norm_factor(dim, kappa) + kappa * dotproducts + + def logpdf(self, x, mu=None, kappa=1): + """Log of the von Mises-Fisher probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + mu : array_like, default: None + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float, default: 1 + Concentration parameter. Must be positive. + + Returns + ------- + logpdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + return self._logpdf(x, dim, mu, kappa) + + def pdf(self, x, mu=None, kappa=1): + """Von Mises-Fisher probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x`. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + return np.exp(self._logpdf(x, dim, mu, kappa)) + + def _rvs_2d(self, mu, kappa, size, random_state): + """ + In 2D, the von Mises-Fisher distribution reduces to the + von Mises distribution which can be efficiently sampled by numpy. + This method is much faster than the general rejection + sampling based algorithm. + + """ + mean_angle = np.arctan2(mu[1], mu[0]) + angle_samples = random_state.vonmises(mean_angle, kappa, size=size) + samples = np.stack([np.cos(angle_samples), np.sin(angle_samples)], + axis=-1) + return samples + + def _rvs_3d(self, kappa, size, random_state): + """ + Generate samples from a von Mises-Fisher distribution + with mu = [1, 0, 0] and kappa. Samples then have to be + rotated towards the desired mean direction mu. + This method is much faster than the general rejection + sampling based algorithm. + Reference: https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf + + """ + if size is None: + sample_size = 1 + else: + sample_size = size + + # compute x coordinate acc. to equation from section 3.1 + x = random_state.random(sample_size) + x = 1. + np.log(x + (1. - x) * np.exp(-2 * kappa))/kappa + + # (y, z) are random 2D vectors that only have to be + # normalized accordingly. Then (x, y z) follow a VMF distribution + temp = np.sqrt(1. - np.square(x)) + uniformcircle = _sample_uniform_direction(2, sample_size, random_state) + samples = np.stack([x, temp * uniformcircle[..., 0], + temp * uniformcircle[..., 1]], + axis=-1) + if size is None: + samples = np.squeeze(samples) + return samples + + def _rejection_sampling(self, dim, kappa, size, random_state): + """ + Generate samples from a n-dimensional von Mises-Fisher distribution + with mu = [1, 0, ..., 0] and kappa via rejection sampling. + Samples then have to be rotated towards the desired mean direction mu. + Reference: https://doi.org/10.1080/03610919408813161 + """ + dim_minus_one = dim - 1 + # calculate number of requested samples + if size is not None: + if not np.iterable(size): + size = (size, ) + n_samples = math.prod(size) + else: + n_samples = 1 + # calculate envelope for rejection sampler (eq. 4) + sqrt = np.sqrt(4 * kappa ** 2. + dim_minus_one ** 2) + envelop_param = (-2 * kappa + sqrt) / dim_minus_one + if envelop_param == 0: + # the regular formula suffers from loss of precision for high + # kappa. This can only be detected by checking for 0 here. + # Workaround: expansion for sqrt variable + # https://www.wolframalpha.com/input?i=sqrt%284*x%5E2%2Bd%5E2%29 + # e = (-2 * k + sqrt(k**2 + d**2)) / d + # ~ (-2 * k + 2 * k + d**2/(4 * k) - d**4/(64 * k**3)) / d + # = d/(4 * k) - d**3/(64 * k**3) + envelop_param = (dim_minus_one/4 * kappa**-1. + - dim_minus_one**3/64 * kappa**-3.) + # reference step 0 + node = (1. - envelop_param) / (1. + envelop_param) + # t = ln(1 - ((1-x)/(1+x))**2) + # = ln(4 * x / (1+x)**2) + # = ln(4) + ln(x) - 2*log1p(x) + correction = (kappa * node + dim_minus_one + * (np.log(4) + np.log(envelop_param) + - 2 * np.log1p(envelop_param))) + n_accepted = 0 + x = np.zeros((n_samples, )) + halfdim = 0.5 * dim_minus_one + # main loop + while n_accepted < n_samples: + # generate candidates acc. to reference step 1 + sym_beta = random_state.beta(halfdim, halfdim, + size=n_samples - n_accepted) + coord_x = (1 - (1 + envelop_param) * sym_beta) / ( + 1 - (1 - envelop_param) * sym_beta) + # accept or reject: reference step 2 + # reformulation for numerical stability: + # t = ln(1 - (1-x)/(1+x) * y) + # = ln((1 + x - y +x*y)/(1 +x)) + accept_tol = random_state.random(n_samples - n_accepted) + criterion = ( + kappa * coord_x + + dim_minus_one * (np.log((1 + envelop_param - coord_x + + coord_x * envelop_param) / (1 + envelop_param))) + - correction) > np.log(accept_tol) + accepted_iter = np.sum(criterion) + x[n_accepted:n_accepted + accepted_iter] = coord_x[criterion] + n_accepted += accepted_iter + # concatenate x and remaining coordinates: step 3 + coord_rest = _sample_uniform_direction(dim_minus_one, n_accepted, + random_state) + coord_rest = np.einsum( + '...,...i->...i', np.sqrt(1 - x ** 2), coord_rest) + samples = np.concatenate([x[..., None], coord_rest], axis=1) + # reshape output to (size, dim) + if size is not None: + samples = samples.reshape(size + (dim, )) + else: + samples = np.squeeze(samples) + return samples + + def _rotate_samples(self, samples, mu, dim): + """A QR decomposition is used to find the rotation that maps the + north pole (1, 0,...,0) to the vector mu. This rotation is then + applied to all samples. + + Parameters + ---------- + samples: array_like, shape = [..., n] + mu : array-like, shape=[n, ] + Point to parametrise the rotation. + + Returns + ------- + samples : rotated samples + + """ + base_point = np.zeros((dim, )) + base_point[0] = 1. + embedded = np.concatenate([mu[None, :], np.zeros((dim - 1, dim))]) + rotmatrix, _ = np.linalg.qr(np.transpose(embedded)) + if np.allclose(np.matmul(rotmatrix, base_point[:, None])[:, 0], mu): + rotsign = 1 + else: + rotsign = -1 + + # apply rotation + samples = np.einsum('ij,...j->...i', rotmatrix, samples) * rotsign + return samples + + def _rvs(self, dim, mu, kappa, size, random_state): + if dim == 2: + samples = self._rvs_2d(mu, kappa, size, random_state) + elif dim == 3: + samples = self._rvs_3d(kappa, size, random_state) + else: + samples = self._rejection_sampling(dim, kappa, size, + random_state) + + if dim != 2: + samples = self._rotate_samples(samples, mu, dim) + return samples + + def rvs(self, mu=None, kappa=1, size=1, random_state=None): + """Draw random samples from a von Mises-Fisher distribution. + + Parameters + ---------- + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, np.random.RandomState, np.random.Generator}, + optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`, `N`), where `N` is the + dimension of the distribution. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + random_state = self._get_random_state(random_state) + samples = self._rvs(dim, mu, kappa, size, random_state) + return samples + + def _entropy(self, dim, kappa): + halfdim = 0.5 * dim + return (-self._log_norm_factor(dim, kappa) - kappa * + ive(halfdim, kappa) / ive(halfdim - 1, kappa)) + + def entropy(self, mu=None, kappa=1): + """Compute the differential entropy of the von Mises-Fisher + distribution. + + Parameters + ---------- + mu : array_like, default: None + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float, default: 1 + Concentration parameter. Must be positive. + + Returns + ------- + h : scalar + Entropy of the von Mises-Fisher distribution. + + """ + dim, _, kappa = self._process_parameters(mu, kappa) + return self._entropy(dim, kappa) + + def fit(self, x): + """Fit the von Mises-Fisher distribution to data. + + Parameters + ---------- + x : array-like + Data the distribution is fitted to. Must be two dimensional. + The second axis of `x` must be unit vectors of norm 1 and + determine the dimensionality of the fitted + von Mises-Fisher distribution. + + Returns + ------- + mu : ndarray + Estimated mean direction. + kappa : float + Estimated concentration parameter. + + """ + # validate input data + x = np.asarray(x) + if x.ndim != 2: + raise ValueError("'x' must be two dimensional.") + if not np.allclose(np.linalg.norm(x, axis=-1), 1.): + msg = "'x' must be unit vectors of norm 1 along last dimension." + raise ValueError(msg) + dim = x.shape[-1] + + # mu is simply the directional mean + dirstats = directional_stats(x) + mu = dirstats.mean_direction + r = dirstats.mean_resultant_length + + # kappa is the solution to the equation: + # r = I[dim/2](kappa) / I[dim/2 -1](kappa) + # = I[dim/2](kappa) * exp(-kappa) / I[dim/2 -1](kappa) * exp(-kappa) + # = ive(dim/2, kappa) / ive(dim/2 -1, kappa) + + halfdim = 0.5 * dim + + def solve_for_kappa(kappa): + bessel_vals = ive([halfdim, halfdim - 1], kappa) + return bessel_vals[0]/bessel_vals[1] - r + + root_res = root_scalar(solve_for_kappa, method="brentq", + bracket=(1e-8, 1e9)) + kappa = root_res.root + return mu, kappa + + +vonmises_fisher = vonmises_fisher_gen() + + +class vonmises_fisher_frozen(multi_rv_frozen): + def __init__(self, mu=None, kappa=1, seed=None): + """Create a frozen von Mises-Fisher distribution. + + Parameters + ---------- + mu : array_like, default: None + Mean direction of the distribution. + kappa : float, default: 1 + Concentration parameter. Must be positive. + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + """ + self._dist = vonmises_fisher_gen(seed) + self.dim, self.mu, self.kappa = ( + self._dist._process_parameters(mu, kappa) + ) + + def logpdf(self, x): + """ + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + + Returns + ------- + logpdf : ndarray or scalar + Log of probability density function evaluated at `x`. + + """ + return self._dist._logpdf(x, self.dim, self.mu, self.kappa) + + def pdf(self, x): + """ + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x`. + + """ + return np.exp(self.logpdf(x)) + + def rvs(self, size=1, random_state=None): + """Draw random variates from the Von Mises-Fisher distribution. + + Parameters + ---------- + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the distribution. + + """ + random_state = self._dist._get_random_state(random_state) + return self._dist._rvs(self.dim, self.mu, self.kappa, size, + random_state) + + def entropy(self): + """ + Calculate the differential entropy of the von Mises-Fisher + distribution. + + Returns + ------- + h: float + Entropy of the Von Mises-Fisher distribution. + + """ + return self._dist._entropy(self.dim, self.kappa) + + +class normal_inverse_gamma_gen(multi_rv_generic): + r"""Normal-inverse-gamma distribution. + + The normal-inverse-gamma distribution is the conjugate prior of a normal + distribution with unknown mean and variance. + + Methods + ------- + pdf(x, s2, mu=0, lmbda=1, a=1, b=1) + Probability density function. + logpdf(x, s2, mu=0, lmbda=1, a=1, b=1) + Log of the probability density function. + mean(mu=0, lmbda=1, a=1, b=1) + Distribution mean. + var(mu=0, lmbda=1, a=1, b=1) + Distribution variance. + rvs(mu=0, lmbda=1, a=1, b=1, size=None, random_state=None) + Draw random samples. + + Parameters + ---------- + mu, lmbda, a, b : array_like + Shape parameters of the distribution. See notes. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + See Also + -------- + norm + invgamma + + Notes + ----- + + The probability density function of `normal_inverse_gamma` is: + + .. math:: + + f(x, \sigma^2; \mu, \lambda, \alpha, \beta) = + \frac{\sqrt{\lambda}}{\sqrt{2 \pi \sigma^2}} + \frac{\beta^\alpha}{\Gamma(\alpha)} + \left( \frac{1}{\sigma^2} \right)^{\alpha + 1} + \exp \left(- \frac{2 \beta + \lambda (x - \mu)^2} {2 \sigma^2} \right) + + where all parameters are real and finite, and :math:`\sigma^2 > 0`, + :math:`\lambda > 0`, :math:`\alpha > 0`, and :math:`\beta > 0`. + + Methods ``normal_inverse_gamma.pdf`` and ``normal_inverse_gamma.logpdf`` + accept `x` and `s2` for arguments :math:`x` and :math:`\sigma^2`. + All methods accept `mu`, `lmbda`, `a`, and `b` for shape parameters + :math:`\mu`, :math:`\lambda`, :math:`\alpha`, and :math:`\beta`, + respectively. + + .. versionadded:: 1.15 + + References + ---------- + .. [1] Normal-inverse-gamma distribution, Wikipedia, + https://en.wikipedia.org/wiki/Normal-inverse-gamma_distribution + + Examples + -------- + Suppose we wish to investigate the relationship between the + normal-inverse-gamma distribution and the inverse gamma distribution. + + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng(527484872345) + >>> mu, lmbda, a, b = 0, 1, 20, 20 + >>> norm_inv_gamma = stats.normal_inverse_gamma(mu, lmbda, a, b) + >>> inv_gamma = stats.invgamma(a, scale=b) + + One approach is to compare the distribution of the `s2` elements of + random variates against the PDF of an inverse gamma distribution. + + >>> _, s2 = norm_inv_gamma.rvs(size=10000, random_state=rng) + >>> bins = np.linspace(s2.min(), s2.max(), 50) + >>> plt.hist(s2, bins=bins, density=True, label='Frequency density') + >>> s2 = np.linspace(s2.min(), s2.max(), 300) + >>> plt.plot(s2, inv_gamma.pdf(s2), label='PDF') + >>> plt.xlabel(r'$\sigma^2$') + >>> plt.ylabel('Frequency density / PMF') + >>> plt.show() + + Similarly, we can compare the marginal distribution of `s2` against + an inverse gamma distribution. + + >>> from scipy.integrate import quad_vec + >>> from scipy import integrate + >>> s2 = np.linspace(0.5, 3, 6) + >>> res = quad_vec(lambda x: norm_inv_gamma.pdf(x, s2), -np.inf, np.inf)[0] + >>> np.allclose(res, inv_gamma.pdf(s2)) + True + + The sample mean is comparable to the mean of the distribution. + + >>> x, s2 = norm_inv_gamma.rvs(size=10000, random_state=rng) + >>> x.mean(), s2.mean() + (np.float64(-0.005254750127304425), np.float64(1.050438111436508)) + >>> norm_inv_gamma.mean() + (np.float64(0.0), np.float64(1.0526315789473684)) + + Similarly, for the variance: + + >>> x.var(ddof=1), s2.var(ddof=1) + (np.float64(1.0546150578185023), np.float64(0.061829865266330754)) + >>> norm_inv_gamma.var() + (np.float64(1.0526315789473684), np.float64(0.061557402277623886)) + + """ + def rvs(self, mu=0, lmbda=1, a=1, b=1, size=None, random_state=None): + """Draw random samples from the distribution. + + Parameters + ---------- + mu, lmbda, a, b : array_like, optional + Shape parameters. `lmbda`, `a`, and `b` must be greater + than zero. + size : int or tuple of ints, optional + Shape of samples to draw. + random_state : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `random_state` is `None`, the `~np.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, seeded + with `random_state`. + If `random_state` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Returns + ------- + x, s2 : ndarray + Random variates. + + """ + random_state = self._get_random_state(random_state) + s2 = invgamma(a, scale=b).rvs(size=size, random_state=random_state) + scale = (s2 / lmbda)**0.5 + x = norm(loc=mu, scale=scale).rvs(size=size, random_state=random_state) + dtype = np.result_type(1.0, mu, lmbda, a, b) + return x.astype(dtype), s2.astype(dtype) + + def _logpdf(self, x, s2, mu, lmbda, a, b): + t1 = 0.5 * (np.log(lmbda) - np.log(2 * np.pi * s2)) + t2 = a*np.log(b) - special.gammaln(a).astype(a.dtype) + t3 = -(a + 1) * np.log(s2) + t4 = -(2*b + lmbda*(x - mu)**2) / (2*s2) + return t1 + t2 + t3 + t4 + + def logpdf(self, x, s2, mu=0, lmbda=1, a=1, b=1): + """Log of the probability density function. + + Parameters + ---------- + x, s2 : array_like + Arguments. `s2` must be greater than zero. + mu, lmbda, a, b : array_like, optional + Shape parameters. `lmbda`, `a`, and `b` must be greater + than zero. + + Returns + ------- + logpdf : ndarray or scalar + Log of the probability density function. + + """ + invalid, args = self._process_parameters_pdf(x, s2, mu, lmbda, a, b) + s2 = args[1] + # Keep it simple for now; lazyselect later, perhaps. + with np.errstate(all='ignore'): + logpdf = np.asarray(self._logpdf(*args)) + logpdf[s2 <= 0] = -np.inf + logpdf[invalid] = np.nan + return logpdf[()] + + def _pdf(self, x, s2, mu, lmbda, a, b): + t1 = np.sqrt(lmbda / (2 * np.pi * s2)) + t2 = b**a / special.gamma(a).astype(a.dtype) + t3 = (1 / s2)**(a + 1) + t4 = np.exp(-(2*b + lmbda*(x - mu)**2) / (2*s2)) + return t1 * t2 * t3 * t4 + + def pdf(self, x, s2, mu=0, lmbda=1, a=1, b=1): + """The probability density function. + + Parameters + ---------- + x, s2 : array_like + Arguments. `s2` must be greater than zero. + mu, lmbda, a, b : array_like, optional + Shape parameters. `lmbda`, `a`, and `b` must be greater + than zero. + + Returns + ------- + logpdf : ndarray or scalar + The probability density function. + + """ + invalid, args = self._process_parameters_pdf(x, s2, mu, lmbda, a, b) + s2 = args[1] + # Keep it simple for now; lazyselect later, perhaps. + with np.errstate(all='ignore'): + pdf = np.asarray(self._pdf(*args)) + pdf[s2 <= 0] = 0 + pdf[invalid] = np.nan + return pdf[()] + + def mean(self, mu=0, lmbda=1, a=1, b=1): + """The mean of the distribution. + + Parameters + ---------- + mu, lmbda, a, b : array_like, optional + Shape parameters. `lmbda` and `b` must be greater + than zero, and `a` must be greater than one. + + Returns + ------- + x, s2 : ndarray + The mean of the distribution. + + """ + invalid, args = self._process_shapes(mu, lmbda, a, b) + mu, lmbda, a, b = args + invalid |= ~(a > 1) + mean_x = np.asarray(mu).copy() + mean_s2 = np.asarray(b / (a - 1)) + mean_x[invalid] = np.nan + mean_s2[invalid] = np.nan + return mean_x[()], mean_s2[()] + + def var(self, mu=0, lmbda=1, a=1, b=1): + """The variance of the distribution. + + Parameters + ---------- + mu, lmbda, a, b : array_like, optional + Shape parameters. `lmbda` and `b` must be greater + than zero, and `a` must be greater than two. + + Returns + ------- + x, s2 : ndarray + The variance of the distribution. + + """ + invalid, args = self._process_shapes(mu, lmbda, a, b) + mu, lmbda, a, b = args + invalid_x = invalid | ~(a > 1) + invalid_s2 = invalid | ~(a > 2) + var_x = b / ((a - 1) * lmbda) + var_s2 = b**2 / ((a - 1)**2 * (a - 2)) + var_x, var_s2 = np.asarray(var_x), np.asarray(var_s2) + var_x[invalid_x] = np.nan + var_s2[invalid_s2] = np.nan + return var_x[()], var_s2[()] + + def _process_parameters_pdf(self, x, s2, mu, lmbda, a, b): + args = np.broadcast_arrays(x, s2, mu, lmbda, a, b) + dtype = np.result_type(1.0, *(arg.dtype for arg in args)) + args = [arg.astype(dtype, copy=False) for arg in args] + x, s2, mu, lmbda, a, b = args + invalid = ~((lmbda > 0) & (a > 0) & (b > 0)) + return invalid, args + + def _process_shapes(self, mu, lmbda, a, b): + args = np.broadcast_arrays(mu, lmbda, a, b) + dtype = np.result_type(1.0, *(arg.dtype for arg in args)) + args = [arg.astype(dtype, copy=False) for arg in args] + mu, lmbda, a, b = args + invalid = ~((lmbda > 0) & (a > 0) & (b > 0)) + return invalid, args + + def __call__(self, mu=0, lmbda=1, a=1, b=1, seed=None): + return normal_inverse_gamma_frozen(mu, lmbda, a, b, seed=seed) + + +normal_inverse_gamma = normal_inverse_gamma_gen() + + +class normal_inverse_gamma_frozen(multi_rv_frozen): + + def __init__(self, mu=0, lmbda=1, a=1, b=1, seed=None): + self._dist = normal_inverse_gamma_gen(seed) + self._shapes = mu, lmbda, a, b + + def logpdf(self, x, s2): + return self._dist.logpdf(x, s2, *self._shapes) + + def pdf(self, x, s2): + return self._dist.pdf(x, s2, *self._shapes) + + def mean(self): + return self._dist.mean(*self._shapes) + + def var(self): + return self._dist.var(*self._shapes) + + def rvs(self, size=None, random_state=None): + return self._dist.rvs(*self._shapes, size=size, random_state=random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# normal_inverse_gamma_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'var', 'rvs']: + method = normal_inverse_gamma_gen.__dict__[name] + method_frozen = normal_inverse_gamma_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params) diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi b/phi4/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1006385a43179478a9a4a32ae5f825aa5b8b35c4 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi @@ -0,0 +1,54 @@ +import numpy as np +from scipy._lib._util import DecimalNumber, IntNumber + + +def _cy_wrapper_centered_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_wrap_around_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_mixture_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_l2_star_discrepancy( + sample: np.ndarray, + iterative: bool, + workers: IntNumber, +) -> float: ... + + +def _cy_wrapper_update_discrepancy( + x_new_view: np.ndarray, + sample_view: np.ndarray, + initial_disc: DecimalNumber, +) -> float: ... + + +def _cy_van_der_corput( + n: IntNumber, + base: IntNumber, + start_index: IntNumber, + workers: IntNumber, +) -> np.ndarray: ... + + +def _cy_van_der_corput_scrambled( + n: IntNumber, + base: IntNumber, + start_index: IntNumber, + permutations: np.ndarray, + workers: IntNumber, +) -> np.ndarray: ... diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_sobol.pyi b/phi4/lib/python3.10/site-packages/scipy/stats/_sobol.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7ca5e3a9c1a142b25ac26401e9ab1cb6726c877f --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_sobol.pyi @@ -0,0 +1,54 @@ +import numpy as np +from scipy._lib._util import IntNumber +from typing import Literal + +def _initialize_v( + v : np.ndarray, + dim : IntNumber, + bits: IntNumber +) -> None: ... + +def _cscramble ( + dim : IntNumber, + bits: IntNumber, + ltm : np.ndarray, + sv: np.ndarray +) -> None: ... + +def _fill_p_cumulative( + p: np.ndarray, + p_cumulative: np.ndarray +) -> None: ... + +def _draw( + n : IntNumber, + num_gen: IntNumber, + dim: IntNumber, + scale: float, + sv: np.ndarray, + quasi: np.ndarray, + sample: np.ndarray + ) -> None: ... + +def _fast_forward( + n: IntNumber, + num_gen: IntNumber, + dim: IntNumber, + sv: np.ndarray, + quasi: np.ndarray + ) -> None: ... + +def _categorize( + draws: np.ndarray, + p_cumulative: np.ndarray, + result: np.ndarray + ) -> None: ... + +_MAXDIM: Literal[21201] +_MAXDEG: Literal[18] + +def _test_find_index( + p_cumulative: np.ndarray, + size: int, + value: float + ) -> int: ... diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_stats.pxd b/phi4/lib/python3.10/site-packages/scipy/stats/_stats.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e01565f75fe232446e4b8b0b50fdf645c8506108 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_stats.pxd @@ -0,0 +1,10 @@ +# destined to be used in a LowLevelCallable + +cdef double _geninvgauss_pdf(double x, void *user_data) noexcept nogil +cdef double _studentized_range_cdf(int n, double[2] x, void *user_data) noexcept nogil +cdef double _studentized_range_cdf_asymptotic(double z, void *user_data) noexcept nogil +cdef double _studentized_range_pdf(int n, double[2] x, void *user_data) noexcept nogil +cdef double _studentized_range_pdf_asymptotic(double z, void *user_data) noexcept nogil +cdef double _studentized_range_moment(int n, double[3] x_arg, void *user_data) noexcept nogil +cdef double _genhyperbolic_pdf(double x, void *user_data) noexcept nogil +cdef double _genhyperbolic_logpdf(double x, void *user_data) noexcept nogil diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py b/phi4/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py new file mode 100644 index 0000000000000000000000000000000000000000..6900eba1fa6157c9de956255c49f5cbce0029c11 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py @@ -0,0 +1,303 @@ +import warnings +import numpy as np +from . import distributions +from .._lib._bunch import _make_tuple_bunch +from ._stats_pythran import siegelslopes as siegelslopes_pythran + +__all__ = ['_find_repeats', 'theilslopes', 'siegelslopes'] + +# This is not a namedtuple for backwards compatibility. See PR #12983 +TheilslopesResult = _make_tuple_bunch('TheilslopesResult', + ['slope', 'intercept', + 'low_slope', 'high_slope']) +SiegelslopesResult = _make_tuple_bunch('SiegelslopesResult', + ['slope', 'intercept']) + + +def theilslopes(y, x=None, alpha=0.95, method='separate'): + r""" + Computes the Theil-Sen estimator for a set of points (x, y). + + `theilslopes` implements a method for robust linear regression. It + computes the slope as the median of all slopes between paired values. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + alpha : float, optional + Confidence degree between 0 and 1. Default is 95% confidence. + Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are + interpreted as "find the 90% confidence interval". + method : {'joint', 'separate'}, optional + Method to be used for computing estimate for intercept. + Following methods are supported, + + * 'joint': Uses np.median(y - slope * x) as intercept. + * 'separate': Uses np.median(y) - slope * np.median(x) + as intercept. + + The default is 'separate'. + + .. versionadded:: 1.8.0 + + Returns + ------- + result : ``TheilslopesResult`` instance + The return value is an object with the following attributes: + + slope : float + Theil slope. + intercept : float + Intercept of the Theil line. + low_slope : float + Lower bound of the confidence interval on `slope`. + high_slope : float + Upper bound of the confidence interval on `slope`. + + See Also + -------- + siegelslopes : a similar technique using repeated medians + + Notes + ----- + The implementation of `theilslopes` follows [1]_. The intercept is + not defined in [1]_, and here it is defined as ``median(y) - + slope*median(x)``, which is given in [3]_. Other definitions of + the intercept exist in the literature such as ``median(y - slope*x)`` + in [4]_. The approach to compute the intercept can be determined by the + parameter ``method``. A confidence interval for the intercept is not + given as this question is not addressed in [1]_. + + For compatibility with older versions of SciPy, the return value acts + like a ``namedtuple`` of length 4, with fields ``slope``, ``intercept``, + ``low_slope``, and ``high_slope``, so one can continue to write:: + + slope, intercept, low_slope, high_slope = theilslopes(y, x) + + References + ---------- + .. [1] P.K. Sen, "Estimates of the regression coefficient based on + Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968. + .. [2] H. Theil, "A rank-invariant method of linear and polynomial + regression analysis I, II and III", Nederl. Akad. Wetensch., Proc. + 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950. + .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed., + John Wiley and Sons, New York, pp. 493. + .. [4] https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-5, 5, num=150) + >>> y = x + np.random.normal(size=x.size) + >>> y[11:15] += 10 # add outliers + >>> y[-5:] -= 7 + + Compute the slope, intercept and 90% confidence interval. For comparison, + also compute the least-squares fit with `linregress`: + + >>> res = stats.theilslopes(y, x, 0.90, method='separate') + >>> lsq_res = stats.linregress(x, y) + + Plot the results. The Theil-Sen regression line is shown in red, with the + dashed red lines illustrating the confidence interval of the slope (note + that the dashed red lines are not the confidence interval of the regression + as the confidence interval of the intercept is not included). The green + line shows the least-squares fit for comparison. + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, y, 'b.') + >>> ax.plot(x, res[1] + res[0] * x, 'r-') + >>> ax.plot(x, res[1] + res[2] * x, 'r--') + >>> ax.plot(x, res[1] + res[3] * x, 'r--') + >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') + >>> plt.show() + + """ + if method not in ['joint', 'separate']: + raise ValueError("method must be either 'joint' or 'separate'." + f"'{method}' is invalid.") + # We copy both x and y so we can use _find_repeats. + y = np.array(y, dtype=float, copy=True).ravel() + if x is None: + x = np.arange(len(y), dtype=float) + else: + x = np.array(x, dtype=float, copy=True).ravel() + if len(x) != len(y): + raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})") + + # Compute sorted slopes only when deltax > 0 + deltax = x[:, np.newaxis] - x + deltay = y[:, np.newaxis] - y + slopes = deltay[deltax > 0] / deltax[deltax > 0] + if not slopes.size: + msg = "All `x` coordinates are identical." + warnings.warn(msg, RuntimeWarning, stacklevel=2) + slopes.sort() + medslope = np.median(slopes) + if method == 'joint': + medinter = np.median(y - medslope * x) + else: + medinter = np.median(y) - medslope * np.median(x) + # Now compute confidence intervals + if alpha > 0.5: + alpha = 1. - alpha + + z = distributions.norm.ppf(alpha / 2.) + # This implements (2.6) from Sen (1968) + _, nxreps = _find_repeats(x) + _, nyreps = _find_repeats(y) + nt = len(slopes) # N in Sen (1968) + ny = len(y) # n in Sen (1968) + # Equation 2.6 in Sen (1968): + sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) - + sum(k * (k-1) * (2*k + 5) for k in nxreps) - + sum(k * (k-1) * (2*k + 5) for k in nyreps)) + # Find the confidence interval indices in `slopes` + try: + sigma = np.sqrt(sigsq) + Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1) + Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0) + delta = slopes[[Rl, Ru]] + except (ValueError, IndexError): + delta = (np.nan, np.nan) + + return TheilslopesResult(slope=medslope, intercept=medinter, + low_slope=delta[0], high_slope=delta[1]) + + +def _find_repeats(arr): + # This function assumes it may clobber its input. + if len(arr) == 0: + return np.array(0, np.float64), np.array(0, np.intp) + + # XXX This cast was previously needed for the Fortran implementation, + # should we ditch it? + arr = np.asarray(arr, np.float64).ravel() + arr.sort() + + # Taken from NumPy 1.9's np.unique. + change = np.concatenate(([True], arr[1:] != arr[:-1])) + unique = arr[change] + change_idx = np.concatenate(np.nonzero(change) + ([arr.size],)) + freq = np.diff(change_idx) + atleast2 = freq > 1 + return unique[atleast2], freq[atleast2] + + +def siegelslopes(y, x=None, method="hierarchical"): + r""" + Computes the Siegel estimator for a set of points (x, y). + + `siegelslopes` implements a method for robust linear regression + using repeated medians (see [1]_) to fit a line to the points (x, y). + The method is robust to outliers with an asymptotic breakdown point + of 50%. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + method : {'hierarchical', 'separate'} + If 'hierarchical', estimate the intercept using the estimated + slope ``slope`` (default option). + If 'separate', estimate the intercept independent of the estimated + slope. See Notes for details. + + Returns + ------- + result : ``SiegelslopesResult`` instance + The return value is an object with the following attributes: + + slope : float + Estimate of the slope of the regression line. + intercept : float + Estimate of the intercept of the regression line. + + See Also + -------- + theilslopes : a similar technique without repeated medians + + Notes + ----- + With ``n = len(y)``, compute ``m_j`` as the median of + the slopes from the point ``(x[j], y[j])`` to all other `n-1` points. + ``slope`` is then the median of all slopes ``m_j``. + Two ways are given to estimate the intercept in [1]_ which can be chosen + via the parameter ``method``. + The hierarchical approach uses the estimated slope ``slope`` + and computes ``intercept`` as the median of ``y - slope*x``. + The other approach estimates the intercept separately as follows: for + each point ``(x[j], y[j])``, compute the intercepts of all the `n-1` + lines through the remaining points and take the median ``i_j``. + ``intercept`` is the median of the ``i_j``. + + The implementation computes `n` times the median of a vector of size `n` + which can be slow for large vectors. There are more efficient algorithms + (see [2]_) which are not implemented here. + + For compatibility with older versions of SciPy, the return value acts + like a ``namedtuple`` of length 2, with fields ``slope`` and + ``intercept``, so one can continue to write:: + + slope, intercept = siegelslopes(y, x) + + References + ---------- + .. [1] A. Siegel, "Robust Regression Using Repeated Medians", + Biometrika, Vol. 69, pp. 242-244, 1982. + + .. [2] A. Stein and M. Werman, "Finding the repeated median regression + line", Proceedings of the Third Annual ACM-SIAM Symposium on + Discrete Algorithms, pp. 409-413, 1992. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-5, 5, num=150) + >>> y = x + np.random.normal(size=x.size) + >>> y[11:15] += 10 # add outliers + >>> y[-5:] -= 7 + + Compute the slope and intercept. For comparison, also compute the + least-squares fit with `linregress`: + + >>> res = stats.siegelslopes(y, x) + >>> lsq_res = stats.linregress(x, y) + + Plot the results. The Siegel regression line is shown in red. The green + line shows the least-squares fit for comparison. + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, y, 'b.') + >>> ax.plot(x, res[1] + res[0] * x, 'r-') + >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') + >>> plt.show() + + """ + if method not in ['hierarchical', 'separate']: + raise ValueError("method can only be 'hierarchical' or 'separate'") + y = np.asarray(y).ravel() + if x is None: + x = np.arange(len(y), dtype=float) + else: + x = np.asarray(x, dtype=float).ravel() + if len(x) != len(y): + raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})") + dtype = np.result_type(x, y, np.float32) # use at least float32 + y, x = y.astype(dtype), x.astype(dtype) + medslope, medinter = siegelslopes_pythran(y, x, method) + return SiegelslopesResult(slope=medslope, intercept=medinter) diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_stats_py.py b/phi4/lib/python3.10/site-packages/scipy/stats/_stats_py.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8c1850cd0dad3d654216ae45dad04dafaa983b --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_stats_py.py @@ -0,0 +1,11015 @@ +# Copyright 2002 Gary Strangman. All rights reserved +# Copyright 2002-2016 The SciPy Developers +# +# The original code from Gary Strangman was heavily adapted for +# use in SciPy by Travis Oliphant. The original code came with the +# following disclaimer: +# +# This software is provided "as-is". There are no expressed or implied +# warranties of any kind, including, but not limited to, the warranties +# of merchantability and fitness for a given application. In no event +# shall Gary Strangman be liable for any direct, indirect, incidental, +# special, exemplary or consequential damages (including, but not limited +# to, loss of use, data or profits, or business interruption) however +# caused and on any theory of liability, whether in contract, strict +# liability or tort (including negligence or otherwise) arising in any way +# out of the use of this software, even if advised of the possibility of +# such damage. + +""" +A collection of basic statistical functions for Python. + +References +---------- +.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + +""" +import warnings +import math +from math import gcd +from collections import namedtuple +from collections.abc import Sequence + +import numpy as np +from numpy import array, asarray, ma + +from scipy import sparse +from scipy.spatial import distance_matrix + +from scipy.optimize import milp, LinearConstraint +from scipy._lib._util import (check_random_state, _get_nan, + _rename_parameter, _contains_nan, + AxisError, _lazywhere) +from scipy._lib.deprecation import _deprecate_positional_args + + +import scipy.special as special +# Import unused here but needs to stay until end of deprecation periode +# See https://github.com/scipy/scipy/issues/15765#issuecomment-1875564522 +from scipy import linalg # noqa: F401 +from . import distributions +from . import _mstats_basic as mstats_basic + +from ._stats_mstats_common import _find_repeats, theilslopes, siegelslopes +from ._stats import _kendall_dis, _toint64, _weightedrankedtau + +from dataclasses import dataclass, field +from ._hypotests import _all_partitions +from ._stats_pythran import _compute_outer_prob_inside_method +from ._resampling import (MonteCarloMethod, PermutationMethod, BootstrapMethod, + monte_carlo_test, permutation_test, bootstrap, + _batch_generator) +from ._axis_nan_policy import (_axis_nan_policy_factory, + _broadcast_concatenate, _broadcast_shapes, + _broadcast_array_shapes_remove_axis, SmallSampleWarning, + too_small_1d_not_omit, too_small_1d_omit, + too_small_nd_not_omit, too_small_nd_omit) +from ._binomtest import _binary_search_for_binom_tst as _binary_search +from scipy._lib._bunch import _make_tuple_bunch +from scipy import stats +from scipy.optimize import root_scalar +from scipy._lib._util import normalize_axis_index +from scipy._lib._array_api import ( + _asarray, + array_namespace, + is_numpy, + xp_size, + xp_moveaxis_to_end, + xp_sign, + xp_vector_norm, + xp_broadcast_promote, +) +from scipy._lib import array_api_extra as xpx +from scipy._lib.deprecation import _deprecated + + +# Functions/classes in other files should be added in `__init__.py`, not here +__all__ = ['find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar', + 'tmin', 'tmax', 'tstd', 'tsem', 'moment', + 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', + 'normaltest', 'jarque_bera', + 'scoreatpercentile', 'percentileofscore', + 'cumfreq', 'relfreq', 'obrientransform', + 'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd', + 'median_abs_deviation', + 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', + 'f_oneway', 'pearsonr', 'fisher_exact', + 'spearmanr', 'pointbiserialr', + 'kendalltau', 'weightedtau', + 'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp', + 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', + 'kstest', 'ks_1samp', 'ks_2samp', + 'chisquare', 'power_divergence', + 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', + 'rankdata', 'combine_pvalues', 'quantile_test', + 'wasserstein_distance', 'wasserstein_distance_nd', 'energy_distance', + 'brunnermunzel', 'alexandergovern', + 'expectile', 'lmoment'] + + +def _chk_asarray(a, axis, *, xp=None): + if xp is None: + xp = array_namespace(a) + + if axis is None: + a = xp.reshape(a, (-1,)) + outaxis = 0 + else: + a = xp.asarray(a) + outaxis = axis + + if a.ndim == 0: + a = xp.reshape(a, (-1,)) + + return a, outaxis + + +def _chk2_asarray(a, b, axis): + if axis is None: + a = np.ravel(a) + b = np.ravel(b) + outaxis = 0 + else: + a = np.asarray(a) + b = np.asarray(b) + outaxis = axis + + if a.ndim == 0: + a = np.atleast_1d(a) + if b.ndim == 0: + b = np.atleast_1d(b) + + return a, b, outaxis + + +def _convert_common_float(*arrays, xp=None): + xp = array_namespace(*arrays) if xp is None else xp + arrays = [_asarray(array, subok=True) for array in arrays] + dtypes = [(xp.asarray(1.).dtype if xp.isdtype(array.dtype, 'integral') + else array.dtype) for array in arrays] + dtype = xp.result_type(*dtypes) + arrays = [xp.astype(array, dtype, copy=False) for array in arrays] + return arrays[0] if len(arrays)==1 else tuple(arrays) + + +SignificanceResult = _make_tuple_bunch('SignificanceResult', + ['statistic', 'pvalue'], []) + + +# note that `weights` are paired with `x` +@_axis_nan_policy_factory( + lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, + result_to_tuple=lambda x: (x,), kwd_samples=['weights']) +def gmean(a, axis=0, dtype=None, weights=None): + r"""Compute the weighted geometric mean along the specified axis. + + The weighted geometric mean of the array :math:`a_i` associated to weights + :math:`w_i` is: + + .. math:: + + \exp \left( \frac{ \sum_{i=1}^n w_i \ln a_i }{ \sum_{i=1}^n w_i } + \right) \, , + + and, with equal weights, it gives: + + .. math:: + + \sqrt[n]{ \prod_{i=1}^n a_i } \, . + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int or None, optional + Axis along which the geometric mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type to which the input arrays are cast before the calculation is + performed. + weights : array_like, optional + The `weights` array must be broadcastable to the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + + Returns + ------- + gmean : ndarray + See `dtype` parameter above. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.average : Weighted average + hmean : Harmonic mean + + Notes + ----- + The sample geometric mean is the exponential of the mean of the natural + logarithms of the observations. + Negative observations will produce NaNs in the output because the *natural* + logarithm (as opposed to the *complex* logarithm) is defined only for + non-negative reals. + + References + ---------- + .. [1] "Weighted Geometric Mean", *Wikipedia*, + https://en.wikipedia.org/wiki/Weighted_geometric_mean. + .. [2] Grossman, J., Grossman, M., Katz, R., "Averages: A New Approach", + Archimedes Foundation, 1983 + + Examples + -------- + >>> from scipy.stats import gmean + >>> gmean([1, 4]) + 2.0 + >>> gmean([1, 2, 3, 4, 5, 6, 7]) + 3.3800151591412964 + >>> gmean([1, 4, 7], weights=[3, 1, 3]) + 2.80668351922014 + + """ + xp = array_namespace(a, weights) + a = xp.asarray(a, dtype=dtype) + + if weights is not None: + weights = xp.asarray(weights, dtype=dtype) + + with np.errstate(divide='ignore'): + log_a = xp.log(a) + + return xp.exp(_xp_mean(log_a, axis=axis, weights=weights)) + + +@_axis_nan_policy_factory( + lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, + result_to_tuple=lambda x: (x,), kwd_samples=['weights']) +def hmean(a, axis=0, dtype=None, *, weights=None): + r"""Calculate the weighted harmonic mean along the specified axis. + + The weighted harmonic mean of the array :math:`a_i` associated to weights + :math:`w_i` is: + + .. math:: + + \frac{ \sum_{i=1}^n w_i }{ \sum_{i=1}^n \frac{w_i}{a_i} } \, , + + and, with equal weights, it gives: + + .. math:: + + \frac{ n }{ \sum_{i=1}^n \frac{1}{a_i} } \, . + + Parameters + ---------- + a : array_like + Input array, masked array or object that can be converted to an array. + axis : int or None, optional + Axis along which the harmonic mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults to the + dtype of `a`, unless `a` has an integer `dtype` with a precision less + than that of the default platform integer. In that case, the default + platform integer is used. + weights : array_like, optional + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given `axis`) or of the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + + .. versionadded:: 1.9 + + Returns + ------- + hmean : ndarray + See `dtype` parameter above. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.average : Weighted average + gmean : Geometric mean + + Notes + ----- + The sample harmonic mean is the reciprocal of the mean of the reciprocals + of the observations. + + The harmonic mean is computed over a single dimension of the input + array, axis=0 by default, or all values in the array if axis=None. + float64 intermediate and return values are used for integer inputs. + + The harmonic mean is only defined if all observations are non-negative; + otherwise, the result is NaN. + + References + ---------- + .. [1] "Weighted Harmonic Mean", *Wikipedia*, + https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean + .. [2] Ferger, F., "The nature and use of the harmonic mean", Journal of + the American Statistical Association, vol. 26, pp. 36-40, 1931 + + Examples + -------- + >>> from scipy.stats import hmean + >>> hmean([1, 4]) + 1.6000000000000001 + >>> hmean([1, 2, 3, 4, 5, 6, 7]) + 2.6997245179063363 + >>> hmean([1, 4, 7], weights=[3, 1, 3]) + 1.9029126213592233 + + """ + xp = array_namespace(a, weights) + a = xp.asarray(a, dtype=dtype) + + if weights is not None: + weights = xp.asarray(weights, dtype=dtype) + + negative_mask = a < 0 + if xp.any(negative_mask): + # `where` avoids having to be careful about dtypes and will work with + # JAX. This is the exceptional case, so it's OK to be a little slower. + # Won't work for array_api_strict for now, but see data-apis/array-api#807 + a = xp.where(negative_mask, xp.nan, a) + message = ("The harmonic mean is only defined if all elements are " + "non-negative; otherwise, the result is NaN.") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + with np.errstate(divide='ignore'): + return 1.0 / _xp_mean(1.0 / a, axis=axis, weights=weights) + + +@_axis_nan_policy_factory( + lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, + result_to_tuple=lambda x: (x,), kwd_samples=['weights']) +def pmean(a, p, *, axis=0, dtype=None, weights=None): + r"""Calculate the weighted power mean along the specified axis. + + The weighted power mean of the array :math:`a_i` associated to weights + :math:`w_i` is: + + .. math:: + + \left( \frac{ \sum_{i=1}^n w_i a_i^p }{ \sum_{i=1}^n w_i } + \right)^{ 1 / p } \, , + + and, with equal weights, it gives: + + .. math:: + + \left( \frac{ 1 }{ n } \sum_{i=1}^n a_i^p \right)^{ 1 / p } \, . + + When ``p=0``, it returns the geometric mean. + + This mean is also called generalized mean or Hölder mean, and must not be + confused with the Kolmogorov generalized mean, also called + quasi-arithmetic mean or generalized f-mean [3]_. + + Parameters + ---------- + a : array_like + Input array, masked array or object that can be converted to an array. + p : int or float + Exponent. + axis : int or None, optional + Axis along which the power mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults to the + dtype of `a`, unless `a` has an integer `dtype` with a precision less + than that of the default platform integer. In that case, the default + platform integer is used. + weights : array_like, optional + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given `axis`) or of the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + + Returns + ------- + pmean : ndarray, see `dtype` parameter above. + Output array containing the power mean values. + + See Also + -------- + numpy.average : Weighted average + gmean : Geometric mean + hmean : Harmonic mean + + Notes + ----- + The power mean is computed over a single dimension of the input + array, ``axis=0`` by default, or all values in the array if ``axis=None``. + float64 intermediate and return values are used for integer inputs. + + The power mean is only defined if all observations are non-negative; + otherwise, the result is NaN. + + .. versionadded:: 1.9 + + References + ---------- + .. [1] "Generalized Mean", *Wikipedia*, + https://en.wikipedia.org/wiki/Generalized_mean + .. [2] Norris, N., "Convexity properties of generalized mean value + functions", The Annals of Mathematical Statistics, vol. 8, + pp. 118-120, 1937 + .. [3] Bullen, P.S., Handbook of Means and Their Inequalities, 2003 + + Examples + -------- + >>> from scipy.stats import pmean, hmean, gmean + >>> pmean([1, 4], 1.3) + 2.639372938300652 + >>> pmean([1, 2, 3, 4, 5, 6, 7], 1.3) + 4.157111214492084 + >>> pmean([1, 4, 7], -2, weights=[3, 1, 3]) + 1.4969684896631954 + + For p=-1, power mean is equal to harmonic mean: + + >>> pmean([1, 4, 7], -1, weights=[3, 1, 3]) + 1.9029126213592233 + >>> hmean([1, 4, 7], weights=[3, 1, 3]) + 1.9029126213592233 + + For p=0, power mean is defined as the geometric mean: + + >>> pmean([1, 4, 7], 0, weights=[3, 1, 3]) + 2.80668351922014 + >>> gmean([1, 4, 7], weights=[3, 1, 3]) + 2.80668351922014 + + """ + if not isinstance(p, (int, float)): + raise ValueError("Power mean only defined for exponent of type int or " + "float.") + if p == 0: + return gmean(a, axis=axis, dtype=dtype, weights=weights) + + xp = array_namespace(a, weights) + a = xp.asarray(a, dtype=dtype) + + if weights is not None: + weights = xp.asarray(weights, dtype=dtype) + + negative_mask = a < 0 + if xp.any(negative_mask): + # `where` avoids having to be careful about dtypes and will work with + # JAX. This is the exceptional case, so it's OK to be a little slower. + # Won't work for array_api_strict for now, but see data-apis/array-api#807 + a = xp.where(negative_mask, np.nan, a) + message = ("The power mean is only defined if all elements are " + "non-negative; otherwise, the result is NaN.") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + with np.errstate(divide='ignore', invalid='ignore'): + return _xp_mean(a**float(p), axis=axis, weights=weights)**(1/p) + + +ModeResult = namedtuple('ModeResult', ('mode', 'count')) + + +def _mode_result(mode, count): + # When a slice is empty, `_axis_nan_policy` automatically produces + # NaN for `mode` and `count`. This is a reasonable convention for `mode`, + # but `count` should not be NaN; it should be zero. + i = np.isnan(count) + if i.shape == (): + count = np.asarray(0, dtype=count.dtype)[()] if i else count + else: + count[i] = 0 + return ModeResult(mode, count) + + +@_axis_nan_policy_factory(_mode_result, override={'vectorization': True, + 'nan_propagation': False}) +def mode(a, axis=0, nan_policy='propagate', keepdims=False): + r"""Return an array of the modal (most common) value in the passed array. + + If there is more than one such value, only one is returned. + The bin-count for the modal bins is also returned. + + Parameters + ---------- + a : array_like + Numeric, n-dimensional array of which to find mode(s). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': treats nan as it would treat any other value + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + keepdims : bool, optional + If set to ``False``, the `axis` over which the statistic is taken + is consumed (eliminated from the output array). If set to ``True``, + the `axis` is retained with size one, and the result will broadcast + correctly against the input array. + + Returns + ------- + mode : ndarray + Array of modal values. + count : ndarray + Array of counts for each mode. + + Notes + ----- + The mode is calculated using `numpy.unique`. + In NumPy versions 1.21 and after, all NaNs - even those with different + binary representations - are treated as equivalent and counted as separate + instances of the same value. + + By convention, the mode of an empty array is NaN, and the associated count + is zero. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[3, 0, 3, 7], + ... [3, 2, 6, 2], + ... [1, 7, 2, 8], + ... [3, 0, 6, 1], + ... [3, 2, 5, 5]]) + >>> from scipy import stats + >>> stats.mode(a, keepdims=True) + ModeResult(mode=array([[3, 0, 6, 1]]), count=array([[4, 2, 2, 1]])) + + To get mode of whole array, specify ``axis=None``: + + >>> stats.mode(a, axis=None, keepdims=True) + ModeResult(mode=[[3]], count=[[5]]) + >>> stats.mode(a, axis=None, keepdims=False) + ModeResult(mode=3, count=5) + + """ + # `axis`, `nan_policy`, and `keepdims` are handled by `_axis_nan_policy` + if not np.issubdtype(a.dtype, np.number): + message = ("Argument `a` is not recognized as numeric. " + "Support for input that cannot be coerced to a numeric " + "array was deprecated in SciPy 1.9.0 and removed in SciPy " + "1.11.0. Please consider `np.unique`.") + raise TypeError(message) + + if a.size == 0: + NaN = _get_nan(a) + return ModeResult(*np.array([NaN, 0], dtype=NaN.dtype)) + + vals, cnts = np.unique(a, return_counts=True) + modes, counts = vals[cnts.argmax()], cnts.max() + return ModeResult(modes[()], counts[()]) + + +def _put_val_to_limits(a, limits, inclusive, val=np.nan, xp=None): + """Replace elements outside limits with a value. + + This is primarily a utility function. + + Parameters + ---------- + a : array + limits : (float or None, float or None) + A tuple consisting of the (lower limit, upper limit). Elements in the + input array less than the lower limit or greater than the upper limit + will be replaced with `val`. None implies no limit. + inclusive : (bool, bool) + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to lower or upper are allowed. + val : float, default: NaN + The value with which extreme elements of the array are replaced. + + """ + xp = array_namespace(a) if xp is None else xp + mask = xp.zeros(a.shape, dtype=xp.bool) + if limits is None: + return a, mask + lower_limit, upper_limit = limits + lower_include, upper_include = inclusive + if lower_limit is not None: + mask |= (a < lower_limit) if lower_include else a <= lower_limit + if upper_limit is not None: + mask |= (a > upper_limit) if upper_include else a >= upper_limit + if xp.all(mask): + raise ValueError("No array values within given limits") + if xp.any(mask): + # hopefully this (and many other instances of this idiom) are temporary when + # data-apis/array-api#807 is resolved + dtype = xp.asarray(1.).dtype if xp.isdtype(a.dtype, 'integral') else a.dtype + a = xp.where(mask, xp.asarray(val, dtype=dtype), a) + return a, mask + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def tmean(a, limits=None, inclusive=(True, True), axis=None): + """Compute the trimmed mean. + + This function finds the arithmetic mean of given values, ignoring values + outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None (default), then all + values are used. Either of the limit values in the tuple can also be + None representing a half-open interval. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to compute test. Default is None. + + Returns + ------- + tmean : ndarray + Trimmed mean. + + See Also + -------- + trim_mean : Returns mean after trimming a proportion from both tails. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmean(x) + 9.5 + >>> stats.tmean(x, (3,17)) + 10.0 + + """ + xp = array_namespace(a) + a, mask = _put_val_to_limits(a, limits, inclusive, val=0., xp=xp) + # explicit dtype specification required due to data-apis/array-api-compat#152 + sum = xp.sum(a, axis=axis, dtype=a.dtype) + n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis, dtype=a.dtype) + mean = _lazywhere(n != 0, (sum, n), xp.divide, xp.nan) + return mean[()] if mean.ndim == 0 else mean + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """Compute the trimmed variance. + + This function computes the sample variance of an array of values, + while ignoring values which are outside of given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tvar : float + Trimmed variance. + + Notes + ----- + `tvar` computes the unbiased sample variance, i.e. it uses a correction + factor ``n / (n - 1)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tvar(x) + 35.0 + >>> stats.tvar(x, (3,17)) + 20.0 + + """ + xp = array_namespace(a) + a, _ = _put_val_to_limits(a, limits, inclusive, xp=xp) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", SmallSampleWarning) + # Currently, this behaves like nan_policy='omit' for alternative array + # backends, but nan_policy='propagate' will be handled for other backends + # by the axis_nan_policy decorator shortly. + return _xp_var(a, correction=ddof, axis=axis, nan_policy='omit', xp=xp) + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'): + """Compute the trimmed minimum. + + This function finds the minimum value of an array `a` along the + specified axis, but only considering values greater than a specified + lower limit. + + Parameters + ---------- + a : array_like + Array of values. + lowerlimit : None or float, optional + Values in the input array less than the given limit will be ignored. + When lowerlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the lower limit + are included. The default value is True. + + Returns + ------- + tmin : float, int or ndarray + Trimmed minimum. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmin(x) + 0 + + >>> stats.tmin(x, 13) + 13 + + >>> stats.tmin(x, 13, inclusive=False) + 14 + + """ + xp = array_namespace(a) + + # remember original dtype; _put_val_to_limits might need to change it + dtype = a.dtype + a, mask = _put_val_to_limits(a, (lowerlimit, None), (inclusive, None), + val=xp.inf, xp=xp) + + min = xp.min(a, axis=axis) + n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis) + res = xp.where(n != 0, min, xp.nan) + + if not xp.any(xp.isnan(res)): + # needed if input is of integer dtype + res = xp.astype(res, dtype, copy=False) + + return res[()] if res.ndim == 0 else res + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'): + """Compute the trimmed maximum. + + This function computes the maximum value of an array along a given axis, + while ignoring values larger than a specified upper limit. + + Parameters + ---------- + a : array_like + Array of values. + upperlimit : None or float, optional + Values in the input array greater than the given limit will be ignored. + When upperlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the upper limit + are included. The default value is True. + + Returns + ------- + tmax : float, int or ndarray + Trimmed maximum. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmax(x) + 19 + + >>> stats.tmax(x, 13) + 13 + + >>> stats.tmax(x, 13, inclusive=False) + 12 + + """ + xp = array_namespace(a) + + # remember original dtype; _put_val_to_limits might need to change it + dtype = a.dtype + a, mask = _put_val_to_limits(a, (None, upperlimit), (None, inclusive), + val=-xp.inf, xp=xp) + + max = xp.max(a, axis=axis) + n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis) + res = xp.where(n != 0, max, xp.nan) + + if not xp.any(xp.isnan(res)): + # needed if input is of integer dtype + res = xp.astype(res, dtype, copy=False) + + return res[()] if res.ndim == 0 else res + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """Compute the trimmed sample standard deviation. + + This function finds the sample standard deviation of given values, + ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tstd : float + Trimmed sample standard deviation. + + Notes + ----- + `tstd` computes the unbiased sample standard deviation, i.e. it uses a + correction factor ``n / (n - 1)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tstd(x) + 5.9160797830996161 + >>> stats.tstd(x, (3,17)) + 4.4721359549995796 + + """ + return tvar(a, limits, inclusive, axis, ddof, _no_deco=True)**0.5 + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """Compute the trimmed standard error of the mean. + + This function finds the standard error of the mean for given + values, ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tsem : float + Trimmed standard error of the mean. + + Notes + ----- + `tsem` uses unbiased sample standard deviation, i.e. it uses a + correction factor ``n / (n - 1)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tsem(x) + 1.3228756555322954 + >>> stats.tsem(x, (3,17)) + 1.1547005383792515 + + """ + xp = array_namespace(a) + a, _ = _put_val_to_limits(a, limits, inclusive, xp=xp) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", SmallSampleWarning) + # Currently, this behaves like nan_policy='omit' for alternative array + # backends, but nan_policy='propagate' will be handled for other backends + # by the axis_nan_policy decorator shortly. + sd = _xp_var(a, correction=ddof, axis=axis, nan_policy='omit', xp=xp)**0.5 + + n_obs = xp.sum(~xp.isnan(a), axis=axis, dtype=sd.dtype) + return sd / n_obs**0.5 + + +##################################### +# MOMENTS # +##################################### + + +def _moment_outputs(kwds, default_order=1): + order = np.atleast_1d(kwds.get('order', default_order)) + message = "`order` must be a scalar or a non-empty 1D array." + if order.size == 0 or order.ndim > 1: + raise ValueError(message) + return len(order) + + +def _moment_result_object(*args): + if len(args) == 1: + return args[0] + return np.asarray(args) + + +# When `order` is array-like with size > 1, moment produces an *array* +# rather than a tuple, but the zeroth dimension is to be treated like +# separate outputs. It is important to make the distinction between +# separate outputs when adding the reduced axes back (`keepdims=True`). +def _moment_tuple(x, n_out): + return tuple(x) if n_out > 1 else (x,) + + +# `moment` fits into the `_axis_nan_policy` pattern, but it is a bit unusual +# because the number of outputs is variable. Specifically, +# `result_to_tuple=lambda x: (x,)` may be surprising for a function that +# can produce more than one output, but it is intended here. +# When `moment is called to produce the output: +# - `result_to_tuple` packs the returned array into a single-element tuple, +# - `_moment_result_object` extracts and returns that single element. +# However, when the input array is empty, `moment` is never called. Instead, +# - `_check_empty_inputs` is used to produce an empty array with the +# appropriate dimensions. +# - A list comprehension creates the appropriate number of copies of this +# array, depending on `n_outputs`. +# - This list - which may have multiple elements - is passed into +# `_moment_result_object`. +# - If there is a single output, `_moment_result_object` extracts and returns +# the single output from the list. +# - If there are multiple outputs, and therefore multiple elements in the list, +# `_moment_result_object` converts the list of arrays to a single array and +# returns it. +# Currently, this leads to a slight inconsistency: when the input array is +# empty, there is no distinction between the `moment` function being called +# with parameter `order=1` and `order=[1]`; the latter *should* produce +# the same as the former but with a singleton zeroth dimension. +@_rename_parameter('moment', 'order') +@_axis_nan_policy_factory( # noqa: E302 + _moment_result_object, n_samples=1, result_to_tuple=_moment_tuple, + n_outputs=_moment_outputs +) +def moment(a, order=1, axis=0, nan_policy='propagate', *, center=None): + r"""Calculate the nth moment about the mean for a sample. + + A moment is a specific quantitative measure of the shape of a set of + points. It is often used to calculate coefficients of skewness and kurtosis + due to its close relationship with them. + + Parameters + ---------- + a : array_like + Input array. + order : int or 1-D array_like of ints, optional + Order of central moment that is returned. Default is 1. + axis : int or None, optional + Axis along which the central moment is computed. Default is 0. + If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + center : float or None, optional + The point about which moments are taken. This can be the sample mean, + the origin, or any other be point. If `None` (default) compute the + center as the sample mean. + + Returns + ------- + n-th moment about the `center` : ndarray or float + The appropriate moment along the given axis or over all values if axis + is None. The denominator for the moment calculation is the number of + observations, no degrees of freedom correction is done. + + See Also + -------- + kurtosis, skew, describe + + Notes + ----- + The k-th moment of a data sample is: + + .. math:: + + m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - c)^k + + Where `n` is the number of samples, and `c` is the center around which the + moment is calculated. This function uses exponentiation by squares [1]_ for + efficiency. + + Note that, if `a` is an empty array (``a.size == 0``), array `moment` with + one element (`moment.size == 1`) is treated the same as scalar `moment` + (``np.isscalar(moment)``). This might produce arrays of unexpected shape. + + References + ---------- + .. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms + + Examples + -------- + >>> from scipy.stats import moment + >>> moment([1, 2, 3, 4, 5], order=1) + 0.0 + >>> moment([1, 2, 3, 4, 5], order=2) + 2.0 + + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + + if xp.isdtype(a.dtype, 'integral'): + a = xp.asarray(a, dtype=xp.float64) + else: + a = xp.asarray(a) + + order = xp.asarray(order, dtype=a.dtype) + if xp_size(order) == 0: + # This is tested by `_moment_outputs`, which is run by the `_axis_nan_policy` + # decorator. Currently, the `_axis_nan_policy` decorator is skipped when `a` + # is a non-NumPy array, so we need to check again. When the decorator is + # updated for array API compatibility, we can remove this second check. + raise ValueError("`order` must be a scalar or a non-empty 1D array.") + if xp.any(order != xp.round(order)): + raise ValueError("All elements of `order` must be integral.") + order = order[()] if order.ndim == 0 else order + + # for array_like order input, return a value for each. + if order.ndim > 0: + # Calculated the mean once at most, and only if it will be used + calculate_mean = center is None and xp.any(order > 1) + mean = xp.mean(a, axis=axis, keepdims=True) if calculate_mean else None + mmnt = [] + for i in range(order.shape[0]): + order_i = order[i] + if center is None and order_i > 1: + mmnt.append(_moment(a, order_i, axis, mean=mean)[np.newaxis, ...]) + else: + mmnt.append(_moment(a, order_i, axis, mean=center)[np.newaxis, ...]) + return xp.concat(mmnt, axis=0) + else: + return _moment(a, order, axis, mean=center) + + +def _demean(a, mean, axis, *, xp, precision_warning=True): + # subtracts `mean` from `a` and returns the result, + # warning if there is catastrophic cancellation. `mean` + # must be the mean of `a` along axis with `keepdims=True`. + # Used in e.g. `_moment`, `_zscore`, `_xp_var`. See gh-15905. + a_zero_mean = a - mean + + if xp_size(a_zero_mean) == 0: + return a_zero_mean + + eps = xp.finfo(mean.dtype).eps * 10 + + with np.errstate(divide='ignore', invalid='ignore'): + rel_diff = xp.max(xp.abs(a_zero_mean), axis=axis, + keepdims=True) / xp.abs(mean) + with np.errstate(invalid='ignore'): + precision_loss = xp.any(rel_diff < eps) + n = (xp_size(a) if axis is None + # compact way to deal with axis tuples or ints + else np.prod(np.asarray(a.shape)[np.asarray(axis)])) + + if precision_loss and n > 1 and precision_warning: + message = ("Precision loss occurred in moment calculation due to " + "catastrophic cancellation. This occurs when the data " + "are nearly identical. Results may be unreliable.") + warnings.warn(message, RuntimeWarning, stacklevel=5) + return a_zero_mean + + +def _moment(a, order, axis, *, mean=None, xp=None): + """Vectorized calculation of raw moment about specified center + + When `mean` is None, the mean is computed and used as the center; + otherwise, the provided value is used as the center. + + """ + xp = array_namespace(a) if xp is None else xp + + if xp.isdtype(a.dtype, 'integral'): + a = xp.asarray(a, dtype=xp.float64) + + dtype = a.dtype + + # moment of empty array is the same regardless of order + if xp_size(a) == 0: + return xp.mean(a, axis=axis) + + if order == 0 or (order == 1 and mean is None): + # By definition the zeroth moment is always 1, and the first *central* + # moment is 0. + shape = list(a.shape) + del shape[axis] + + temp = (xp.ones(shape, dtype=dtype) if order == 0 + else xp.zeros(shape, dtype=dtype)) + return temp[()] if temp.ndim == 0 else temp + + # Exponentiation by squares: form exponent sequence + n_list = [order] + current_n = order + while current_n > 2: + if current_n % 2: + current_n = (current_n - 1) / 2 + else: + current_n /= 2 + n_list.append(current_n) + + # Starting point for exponentiation by squares + mean = (xp.mean(a, axis=axis, keepdims=True) if mean is None + else xp.asarray(mean, dtype=dtype)) + mean = mean[()] if mean.ndim == 0 else mean + a_zero_mean = _demean(a, mean, axis, xp=xp) + + if n_list[-1] == 1: + s = xp.asarray(a_zero_mean, copy=True) + else: + s = a_zero_mean**2 + + # Perform multiplications + for n in n_list[-2::-1]: + s = s**2 + if n % 2: + s *= a_zero_mean + return xp.mean(s, axis=axis) + + +def _var(x, axis=0, ddof=0, mean=None, xp=None): + # Calculate variance of sample, warning if precision is lost + xp = array_namespace(x) if xp is None else xp + var = _moment(x, 2, axis, mean=mean, xp=xp) + if ddof != 0: + n = x.shape[axis] if axis is not None else xp_size(x) + var *= np.divide(n, n-ddof) # to avoid error on division by zero + return var + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1 +) +# nan_policy handled by `_axis_nan_policy`, but needs to be left +# in signature to preserve use as a positional argument +def skew(a, axis=0, bias=True, nan_policy='propagate'): + r"""Compute the sample skewness of a data set. + + For normally distributed data, the skewness should be about zero. For + unimodal continuous distributions, a skewness value greater than zero means + that there is more weight in the right tail of the distribution. The + function `skewtest` can be used to determine if the skewness value + is close enough to zero, statistically speaking. + + Parameters + ---------- + a : ndarray + Input array. + axis : int or None, optional + Axis along which skewness is calculated. Default is 0. + If None, compute over the whole array `a`. + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + skewness : ndarray + The skewness of values along an axis, returning NaN where all values + are equal. + + Notes + ----- + The sample skewness is computed as the Fisher-Pearson coefficient + of skewness, i.e. + + .. math:: + + g_1=\frac{m_3}{m_2^{3/2}} + + where + + .. math:: + + m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i + + is the biased sample :math:`i\texttt{th}` central moment, and + :math:`\bar{x}` is + the sample mean. If ``bias`` is False, the calculations are + corrected for bias and the value computed is the adjusted + Fisher-Pearson standardized moment coefficient, i.e. + + .. math:: + + G_1=\frac{k_3}{k_2^{3/2}}= + \frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + Section 2.2.24.1 + + Examples + -------- + >>> from scipy.stats import skew + >>> skew([1, 2, 3, 4, 5]) + 0.0 + >>> skew([2, 8, 0, 4, 1, 9, 9, 0]) + 0.2650554122698573 + + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + n = a.shape[axis] + + mean = xp.mean(a, axis=axis, keepdims=True) + mean_reduced = xp.squeeze(mean, axis=axis) # needed later + m2 = _moment(a, 2, axis, mean=mean, xp=xp) + m3 = _moment(a, 3, axis, mean=mean, xp=xp) + with np.errstate(all='ignore'): + eps = xp.finfo(m2.dtype).eps + zero = m2 <= (eps * mean_reduced)**2 + vals = xp.where(zero, xp.asarray(xp.nan), m3 / m2**1.5) + if not bias: + can_correct = ~zero & (n > 2) + if xp.any(can_correct): + m2 = m2[can_correct] + m3 = m3[can_correct] + nval = ((n - 1.0) * n)**0.5 / (n - 2.0) * m3 / m2**1.5 + vals[can_correct] = nval + + return vals[()] if vals.ndim == 0 else vals + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1 +) +# nan_policy handled by `_axis_nan_policy`, but needs to be left +# in signature to preserve use as a positional argument +def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'): + """Compute the kurtosis (Fisher or Pearson) of a dataset. + + Kurtosis is the fourth central moment divided by the square of the + variance. If Fisher's definition is used, then 3.0 is subtracted from + the result to give 0.0 for a normal distribution. + + If bias is False then the kurtosis is calculated using k statistics to + eliminate bias coming from biased moment estimators + + Use `kurtosistest` to see if result is close enough to normal. + + Parameters + ---------- + a : array + Data for which the kurtosis is calculated. + axis : int or None, optional + Axis along which the kurtosis is calculated. Default is 0. + If None, compute over the whole array `a`. + fisher : bool, optional + If True, Fisher's definition is used (normal ==> 0.0). If False, + Pearson's definition is used (normal ==> 3.0). + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + kurtosis : array + The kurtosis of values along an axis, returning NaN where all values + are equal. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + + Examples + -------- + In Fisher's definition, the kurtosis of the normal distribution is zero. + In the following example, the kurtosis is close to zero, because it was + calculated from the dataset, not from the continuous distribution. + + >>> import numpy as np + >>> from scipy.stats import norm, kurtosis + >>> data = norm.rvs(size=1000, random_state=3) + >>> kurtosis(data) + -0.06928694200380558 + + The distribution with a higher kurtosis has a heavier tail. + The zero valued kurtosis of the normal distribution in Fisher's definition + can serve as a reference point. + + >>> import matplotlib.pyplot as plt + >>> import scipy.stats as stats + >>> from scipy.stats import kurtosis + + >>> x = np.linspace(-5, 5, 100) + >>> ax = plt.subplot() + >>> distnames = ['laplace', 'norm', 'uniform'] + + >>> for distname in distnames: + ... if distname == 'uniform': + ... dist = getattr(stats, distname)(loc=-2, scale=4) + ... else: + ... dist = getattr(stats, distname) + ... data = dist.rvs(size=1000) + ... kur = kurtosis(data, fisher=True) + ... y = dist.pdf(x) + ... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3))) + ... ax.legend() + + The Laplace distribution has a heavier tail than the normal distribution. + The uniform distribution (which has negative kurtosis) has the thinnest + tail. + + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + + n = a.shape[axis] + mean = xp.mean(a, axis=axis, keepdims=True) + mean_reduced = xp.squeeze(mean, axis=axis) # needed later + m2 = _moment(a, 2, axis, mean=mean, xp=xp) + m4 = _moment(a, 4, axis, mean=mean, xp=xp) + with np.errstate(all='ignore'): + zero = m2 <= (xp.finfo(m2.dtype).eps * mean_reduced)**2 + NaN = _get_nan(m4, xp=xp) + vals = xp.where(zero, NaN, m4 / m2**2.0) + + if not bias: + can_correct = ~zero & (n > 3) + if xp.any(can_correct): + m2 = m2[can_correct] + m4 = m4[can_correct] + nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0) + vals[can_correct] = nval + 3.0 + + vals = vals - 3 if fisher else vals + return vals[()] if vals.ndim == 0 else vals + + +DescribeResult = namedtuple('DescribeResult', + ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis')) + + +def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'): + """Compute several descriptive statistics of the passed array. + + Parameters + ---------- + a : array_like + Input data. + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + ddof : int, optional + Delta degrees of freedom (only for variance). Default is 1. + bias : bool, optional + If False, then the skewness and kurtosis calculations are corrected + for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + nobs : int or ndarray of ints + Number of observations (length of data along `axis`). + When 'omit' is chosen as nan_policy, the length along each axis + slice is counted separately. + minmax: tuple of ndarrays or floats + Minimum and maximum value of `a` along the given axis. + mean : ndarray or float + Arithmetic mean of `a` along the given axis. + variance : ndarray or float + Unbiased variance of `a` along the given axis; denominator is number + of observations minus one. + skewness : ndarray or float + Skewness of `a` along the given axis, based on moment calculations + with denominator equal to the number of observations, i.e. no degrees + of freedom correction. + kurtosis : ndarray or float + Kurtosis (Fisher) of `a` along the given axis. The kurtosis is + normalized so that it is zero for the normal distribution. No + degrees of freedom are used. + + Raises + ------ + ValueError + If size of `a` is 0. + + See Also + -------- + skew, kurtosis + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(10) + >>> stats.describe(a) + DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, + variance=9.166666666666666, skewness=0.0, + kurtosis=-1.2242424242424244) + >>> b = [[1, 2], [3, 4]] + >>> stats.describe(b) + DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])), + mean=array([2., 3.]), variance=array([2., 2.]), + skewness=array([0., 0.]), kurtosis=array([-2., -2.])) + + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + # only NumPy gets here; `_contains_nan` raises error for the rest + a = ma.masked_invalid(a) + return mstats_basic.describe(a, axis, ddof, bias) + + if xp_size(a) == 0: + raise ValueError("The input must not be empty.") + + n = a.shape[axis] + mm = (xp.min(a, axis=axis), xp.max(a, axis=axis)) + m = xp.mean(a, axis=axis) + v = _var(a, axis=axis, ddof=ddof, xp=xp) + sk = skew(a, axis, bias=bias) + kurt = kurtosis(a, axis, bias=bias) + + return DescribeResult(n, mm, m, v, sk, kurt) + +##################################### +# NORMALITY TESTS # +##################################### + + +def _get_pvalue(statistic, distribution, alternative, symmetric=True, xp=None): + """Get p-value given the statistic, (continuous) distribution, and alternative""" + xp = array_namespace(statistic) if xp is None else xp + + if alternative == 'less': + pvalue = distribution.cdf(statistic) + elif alternative == 'greater': + pvalue = distribution.sf(statistic) + elif alternative == 'two-sided': + pvalue = 2 * (distribution.sf(xp.abs(statistic)) if symmetric + else xp.minimum(distribution.cdf(statistic), + distribution.sf(statistic))) + else: + message = "`alternative` must be 'less', 'greater', or 'two-sided'." + raise ValueError(message) + + return pvalue + + +SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(SkewtestResult, n_samples=1, too_small=7) +# nan_policy handled by `_axis_nan_policy`, but needs to be left +# in signature to preserve use as a positional argument +def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'): + r"""Test whether the skew is different from the normal distribution. + + This function tests the null hypothesis that the skewness of + the population that the sample was drawn from is the same + as that of a corresponding normal distribution. + + Parameters + ---------- + a : array + The data to be tested. Must contain at least eight observations. + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the skewness of the distribution underlying the sample + is different from that of the normal distribution (i.e. 0) + * 'less': the skewness of the distribution underlying the sample + is less than that of the normal distribution + * 'greater': the skewness of the distribution underlying the sample + is greater than that of the normal distribution + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + The p-value for the hypothesis test. + + See Also + -------- + :ref:`hypothesis_skewtest` : Extended example + + Notes + ----- + The sample size must be at least 8. + + References + ---------- + .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr., + "A suggestion for using powerful and informative tests of + normality", American Statistician 44, pp. 316-321, 1990. + + Examples + -------- + + >>> from scipy.stats import skewtest + >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8]) + SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897) + >>> skewtest([2, 8, 0, 4, 1, 9, 9, 0]) + SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459) + >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000]) + SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133) + >>> skewtest([100, 100, 100, 100, 100, 100, 100, 101]) + SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634) + >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less') + SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052) + >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater') + SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484) + + For a more detailed example, see :ref:`hypothesis_skewtest`. + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + + b2 = skew(a, axis, _no_deco=True) + n = a.shape[axis] + if n < 8: + message = ("`skewtest` requires at least 8 observations; " + f"only {n=} observations were given.") + raise ValueError(message) + + y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2))) + beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) / + ((n-2.0) * (n+5) * (n+7) * (n+9))) + W2 = -1 + math.sqrt(2 * (beta2 - 1)) + delta = 1 / math.sqrt(0.5 * math.log(W2)) + alpha = math.sqrt(2.0 / (W2 - 1)) + y = xp.where(y == 0, xp.asarray(1, dtype=y.dtype), y) + Z = delta * xp.log(y / alpha + xp.sqrt((y / alpha)**2 + 1)) + + pvalue = _get_pvalue(Z, _SimpleNormal(), alternative, xp=xp) + + Z = Z[()] if Z.ndim == 0 else Z + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + return SkewtestResult(Z, pvalue) + + +KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(KurtosistestResult, n_samples=1, too_small=4) +def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'): + r"""Test whether a dataset has normal kurtosis. + + This function tests the null hypothesis that the kurtosis + of the population from which the sample was drawn is that + of the normal distribution. + + Parameters + ---------- + a : array + Array of the sample data. Must contain at least five observations. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the kurtosis of the distribution underlying the sample + is different from that of the normal distribution + * 'less': the kurtosis of the distribution underlying the sample + is less than that of the normal distribution + * 'greater': the kurtosis of the distribution underlying the sample + is greater than that of the normal distribution + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + The p-value for the hypothesis test. + + See Also + -------- + :ref:`hypothesis_kurtosistest` : Extended example + + Notes + ----- + Valid only for n>20. This function uses the method described in [1]_. + + References + ---------- + .. [1] F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis + statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.stats import kurtosistest + >>> kurtosistest(list(range(20))) + KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348) + >>> kurtosistest(list(range(20)), alternative='less') + KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174) + >>> kurtosistest(list(range(20)), alternative='greater') + KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583) + >>> rng = np.random.default_rng() + >>> s = rng.normal(0, 1, 1000) + >>> kurtosistest(s) + KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987) + + For a more detailed example, see :ref:`hypothesis_kurtosistest`. + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + + n = a.shape[axis] + + if n < 5: + message = ("`kurtosistest` requires at least 5 observations; " + f"only {n=} observations were given.") + raise ValueError(message) + if n < 20: + message = ("`kurtosistest` p-value may be inaccurate with fewer than 20 " + f"observations; only {n=} observations were given.") + warnings.warn(message, stacklevel=2) + b2 = kurtosis(a, axis, fisher=False, _no_deco=True) + + E = 3.0*(n-1) / (n+1) + varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1 + x = (b2-E) / varb2**0.5 # [1]_ Eq. 4 + # [1]_ Eq. 2: + sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * ((6.0*(n+3)*(n+5)) + / (n*(n-2)*(n-3)))**0.5 + # [1]_ Eq. 3: + A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + (1+4.0/(sqrtbeta1**2))**0.5) + term1 = 1 - 2/(9.0*A) + denom = 1 + x * (2/(A-4.0))**0.5 + NaN = _get_nan(x, xp=xp) + term2 = xp_sign(denom) * xp.where(denom == 0.0, NaN, + ((1-2.0/A)/xp.abs(denom))**(1/3)) + if xp.any(denom == 0): + msg = ("Test statistic not defined in some cases due to division by " + "zero. Return nan in that case...") + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + Z = (term1 - term2) / (2/(9.0*A))**0.5 # [1]_ Eq. 5 + + pvalue = _get_pvalue(Z, _SimpleNormal(), alternative, xp=xp) + + Z = Z[()] if Z.ndim == 0 else Z + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + return KurtosistestResult(Z, pvalue) + + +NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(NormaltestResult, n_samples=1, too_small=7) +def normaltest(a, axis=0, nan_policy='propagate'): + r"""Test whether a sample differs from a normal distribution. + + This function tests the null hypothesis that a sample comes + from a normal distribution. It is based on D'Agostino and + Pearson's [1]_, [2]_ test that combines skew and kurtosis to + produce an omnibus test of normality. + + Parameters + ---------- + a : array_like + The array containing the sample to be tested. Must contain + at least eight observations. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + statistic : float or array + ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and + ``k`` is the z-score returned by `kurtosistest`. + pvalue : float or array + A 2-sided chi squared probability for the hypothesis test. + + See Also + -------- + :ref:`hypothesis_normaltest` : Extended example + + References + ---------- + .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for + moderate and large sample size", Biometrika, 58, 341-348 + .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from + normality", Biometrika, 60, 613-622 + + Examples + -------- + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> pts = 1000 + >>> a = rng.normal(0, 1, size=pts) + >>> b = rng.normal(2, 1, size=pts) + >>> x = np.concatenate((a, b)) + >>> res = stats.normaltest(x) + >>> res.statistic + 53.619... # random + >>> res.pvalue + 2.273917413209226e-12 # random + + For a more detailed example, see :ref:`hypothesis_normaltest`. + """ + xp = array_namespace(a) + + s, _ = skewtest(a, axis, _no_deco=True) + k, _ = kurtosistest(a, axis, _no_deco=True) + statistic = s*s + k*k + + chi2 = _SimpleChi2(xp.asarray(2.)) + pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=xp) + + statistic = statistic[()] if statistic.ndim == 0 else statistic + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + + return NormaltestResult(statistic, pvalue) + + +@_axis_nan_policy_factory(SignificanceResult, default_axis=None) +def jarque_bera(x, *, axis=None): + r"""Perform the Jarque-Bera goodness of fit test on sample data. + + The Jarque-Bera test tests whether the sample data has the skewness and + kurtosis matching a normal distribution. + + Note that this test only works for a large enough number of data samples + (>2000) as the test statistic asymptotically has a Chi-squared distribution + with 2 degrees of freedom. + + Parameters + ---------- + x : array_like + Observations of a random variable. + axis : int or None, default: 0 + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear in + a corresponding element of the output. + If ``None``, the input will be raveled before computing the statistic. + + Returns + ------- + result : SignificanceResult + An object with the following attributes: + + statistic : float + The test statistic. + pvalue : float + The p-value for the hypothesis test. + + See Also + -------- + :ref:`hypothesis_jarque_bera` : Extended example + + References + ---------- + .. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality, + homoscedasticity and serial independence of regression residuals", + 6 Econometric Letters 255-259. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x = rng.normal(0, 1, 100000) + >>> jarque_bera_test = stats.jarque_bera(x) + >>> jarque_bera_test + Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775) + >>> jarque_bera_test.statistic + 3.3415184718131554 + >>> jarque_bera_test.pvalue + 0.18810419594996775 + + For a more detailed example, see :ref:`hypothesis_jarque_bera`. + """ + xp = array_namespace(x) + x = xp.asarray(x) + if axis is None: + x = xp.reshape(x, (-1,)) + axis = 0 + + n = x.shape[axis] + if n == 0: + raise ValueError('At least one observation is required.') + + mu = xp.mean(x, axis=axis, keepdims=True) + diffx = x - mu + s = skew(diffx, axis=axis, _no_deco=True) + k = kurtosis(diffx, axis=axis, _no_deco=True) + statistic = n / 6 * (s**2 + k**2 / 4) + + chi2 = _SimpleChi2(xp.asarray(2.)) + pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=xp) + + statistic = statistic[()] if statistic.ndim == 0 else statistic + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + + return SignificanceResult(statistic, pvalue) + + +##################################### +# FREQUENCY FUNCTIONS # +##################################### + + +def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', + axis=None): + """Calculate the score at a given percentile of the input sequence. + + For example, the score at ``per=50`` is the median. If the desired quantile + lies between two data points, we interpolate between them, according to + the value of `interpolation`. If the parameter `limit` is provided, it + should be a tuple (lower, upper) of two values. + + Parameters + ---------- + a : array_like + A 1-D array of values from which to extract score. + per : array_like + Percentile(s) at which to extract score. Values should be in range + [0,100]. + limit : tuple, optional + Tuple of two scalars, the lower and upper limits within which to + compute the percentile. Values of `a` outside + this (closed) interval will be ignored. + interpolation_method : {'fraction', 'lower', 'higher'}, optional + Specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j` + The following options are available (default is 'fraction'): + + * 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the + fractional part of the index surrounded by ``i`` and ``j`` + * 'lower': ``i`` + * 'higher': ``j`` + + axis : int, optional + Axis along which the percentiles are computed. Default is None. If + None, compute over the whole array `a`. + + Returns + ------- + score : float or ndarray + Score at percentile(s). + + See Also + -------- + percentileofscore, numpy.percentile + + Notes + ----- + This function will become obsolete in the future. + For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality + that `scoreatpercentile` provides. And it's significantly faster. + Therefore it's recommended to use `numpy.percentile` for users that have + numpy >= 1.9. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(100) + >>> stats.scoreatpercentile(a, 50) + 49.5 + + """ + # adapted from NumPy's percentile function. When we require numpy >= 1.8, + # the implementation of this function can be replaced by np.percentile. + a = np.asarray(a) + if a.size == 0: + # empty array, return nan(s) with shape matching `per` + if np.isscalar(per): + return np.nan + else: + return np.full(np.asarray(per).shape, np.nan, dtype=np.float64) + + if limit: + a = a[(limit[0] <= a) & (a <= limit[1])] + + sorted_ = np.sort(a, axis=axis) + if axis is None: + axis = 0 + + return _compute_qth_percentile(sorted_, per, interpolation_method, axis) + + +# handle sequence of per's without calling sort multiple times +def _compute_qth_percentile(sorted_, per, interpolation_method, axis): + if not np.isscalar(per): + score = [_compute_qth_percentile(sorted_, i, + interpolation_method, axis) + for i in per] + return np.array(score) + + if not (0 <= per <= 100): + raise ValueError("percentile must be in the range [0, 100]") + + indexer = [slice(None)] * sorted_.ndim + idx = per / 100. * (sorted_.shape[axis] - 1) + + if int(idx) != idx: + # round fractional indices according to interpolation method + if interpolation_method == 'lower': + idx = int(np.floor(idx)) + elif interpolation_method == 'higher': + idx = int(np.ceil(idx)) + elif interpolation_method == 'fraction': + pass # keep idx as fraction and interpolate + else: + raise ValueError("interpolation_method can only be 'fraction', " + "'lower' or 'higher'") + + i = int(idx) + if i == idx: + indexer[axis] = slice(i, i + 1) + weights = array(1) + sumval = 1.0 + else: + indexer[axis] = slice(i, i + 2) + j = i + 1 + weights = array([(j - idx), (idx - i)], float) + wshape = [1] * sorted_.ndim + wshape[axis] = 2 + weights.shape = wshape + sumval = weights.sum() + + # Use np.add.reduce (== np.sum but a little faster) to coerce data type + return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval + + +def percentileofscore(a, score, kind='rank', nan_policy='propagate'): + """Compute the percentile rank of a score relative to a list of scores. + + A `percentileofscore` of, for example, 80% means that 80% of the + scores in `a` are below the given score. In the case of gaps or + ties, the exact definition depends on the optional keyword, `kind`. + + Parameters + ---------- + a : array_like + A 1-D array to which `score` is compared. + score : array_like + Scores to compute percentiles for. + kind : {'rank', 'weak', 'strict', 'mean'}, optional + Specifies the interpretation of the resulting score. + The following options are available (default is 'rank'): + + * 'rank': Average percentage ranking of score. In case of multiple + matches, average the percentage rankings of all matching scores. + * 'weak': This kind corresponds to the definition of a cumulative + distribution function. A percentileofscore of 80% means that 80% + of values are less than or equal to the provided score. + * 'strict': Similar to "weak", except that only values that are + strictly less than the given score are counted. + * 'mean': The average of the "weak" and "strict" scores, often used + in testing. See https://en.wikipedia.org/wiki/Percentile_rank + nan_policy : {'propagate', 'raise', 'omit'}, optional + Specifies how to treat `nan` values in `a`. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan (for each value in `score`). + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + pcos : float + Percentile-position of score (0-100) relative to `a`. + + See Also + -------- + numpy.percentile + scipy.stats.scoreatpercentile, scipy.stats.rankdata + + Examples + -------- + Three-quarters of the given values lie below a given score: + + >>> import numpy as np + >>> from scipy import stats + >>> stats.percentileofscore([1, 2, 3, 4], 3) + 75.0 + + With multiple matches, note how the scores of the two matches, 0.6 + and 0.8 respectively, are averaged: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3) + 70.0 + + Only 2/5 values are strictly less than 3: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') + 40.0 + + But 4/5 values are less than or equal to 3: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') + 80.0 + + The average between the weak and the strict scores is: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') + 60.0 + + Score arrays (of any dimensionality) are supported: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], [2, 3]) + array([40., 70.]) + + The inputs can be infinite: + + >>> stats.percentileofscore([-np.inf, 0, 1, np.inf], [1, 2, np.inf]) + array([75., 75., 100.]) + + If `a` is empty, then the resulting percentiles are all `nan`: + + >>> stats.percentileofscore([], [1, 2]) + array([nan, nan]) + """ + + a = np.asarray(a) + n = len(a) + score = np.asarray(score) + + # Nan treatment + cna, npa = _contains_nan(a, nan_policy) + cns, nps = _contains_nan(score, nan_policy) + + if (cna or cns) and nan_policy == 'raise': + raise ValueError("The input contains nan values") + + if cns: + # If a score is nan, then the output should be nan + # (also if nan_policy is "omit", because it only applies to `a`) + score = ma.masked_where(np.isnan(score), score) + + if cna: + if nan_policy == "omit": + # Don't count nans + a = ma.masked_where(np.isnan(a), a) + n = a.count() + + if nan_policy == "propagate": + # All outputs should be nans + n = 0 + + # Cannot compare to empty list ==> nan + if n == 0: + perct = np.full_like(score, np.nan, dtype=np.float64) + + else: + # Prepare broadcasting + score = score[..., None] + + def count(x): + return np.count_nonzero(x, -1) + + # Main computations/logic + if kind == 'rank': + left = count(a < score) + right = count(a <= score) + plus1 = left < right + perct = (left + right + plus1) * (50.0 / n) + elif kind == 'strict': + perct = count(a < score) * (100.0 / n) + elif kind == 'weak': + perct = count(a <= score) * (100.0 / n) + elif kind == 'mean': + left = count(a < score) + right = count(a <= score) + perct = (left + right) * (50.0 / n) + else: + raise ValueError( + "kind can only be 'rank', 'strict', 'weak' or 'mean'") + + # Re-insert nan values + perct = ma.filled(perct, np.nan) + + if perct.ndim == 0: + return perct[()] + return perct + + +HistogramResult = namedtuple('HistogramResult', + ('count', 'lowerlimit', 'binsize', 'extrapoints')) + + +def _histogram(a, numbins=10, defaultlimits=None, weights=None, + printextras=False): + """Create a histogram. + + Separate the range into several bins and return the number of instances + in each bin. + + Parameters + ---------- + a : array_like + Array of scores which will be put into bins. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultlimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in a is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + printextras : bool, optional + If True, if there are extra points (i.e. the points that fall outside + the bin limits) a warning is raised saying how many of those points + there are. Default is False. + + Returns + ------- + count : ndarray + Number of points (or sum of weights) in each bin. + lowerlimit : float + Lowest value of histogram, the lower limit of the first bin. + binsize : float + The size of the bins (all bins have the same size). + extrapoints : int + The number of points outside the range of the histogram. + + See Also + -------- + numpy.histogram + + Notes + ----- + This histogram is based on numpy's histogram but has a larger range by + default if default limits is not set. + + """ + a = np.ravel(a) + if defaultlimits is None: + if a.size == 0: + # handle empty arrays. Undetermined range, so use 0-1. + defaultlimits = (0, 1) + else: + # no range given, so use values in `a` + data_min = a.min() + data_max = a.max() + # Have bins extend past min and max values slightly + s = (data_max - data_min) / (2. * (numbins - 1.)) + defaultlimits = (data_min - s, data_max + s) + + # use numpy's histogram method to compute bins + hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits, + weights=weights) + # hist are not always floats, convert to keep with old output + hist = np.array(hist, dtype=float) + # fixed width for bins is assumed, as numpy's histogram gives + # fixed width bins for int values for 'bins' + binsize = bin_edges[1] - bin_edges[0] + # calculate number of extra points + extrapoints = len([v for v in a + if defaultlimits[0] > v or v > defaultlimits[1]]) + if extrapoints > 0 and printextras: + warnings.warn(f"Points outside given histogram range = {extrapoints}", + stacklevel=3,) + + return HistogramResult(hist, defaultlimits[0], binsize, extrapoints) + + +CumfreqResult = namedtuple('CumfreqResult', + ('cumcount', 'lowerlimit', 'binsize', + 'extrapoints')) + + +def cumfreq(a, numbins=10, defaultreallimits=None, weights=None): + """Return a cumulative frequency histogram, using the histogram function. + + A cumulative histogram is a mapping that counts the cumulative number of + observations in all of the bins up to the specified bin. + + Parameters + ---------- + a : array_like + Input array. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultreallimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + + Returns + ------- + cumcount : ndarray + Binned values of cumulative frequency. + lowerlimit : float + Lower real limit + binsize : float + Width of each bin. + extrapoints : int + Extra points. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x = [1, 4, 2, 1, 3, 1] + >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) + >>> res.cumcount + array([ 1., 2., 3., 3.]) + >>> res.extrapoints + 3 + + Create a normal distribution with 1000 random values + + >>> samples = stats.norm.rvs(size=1000, random_state=rng) + + Calculate cumulative frequencies + + >>> res = stats.cumfreq(samples, numbins=25) + + Calculate space of values for x + + >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, + ... res.cumcount.size) + + Plot histogram and cumulative histogram + + >>> fig = plt.figure(figsize=(10, 4)) + >>> ax1 = fig.add_subplot(1, 2, 1) + >>> ax2 = fig.add_subplot(1, 2, 2) + >>> ax1.hist(samples, bins=25) + >>> ax1.set_title('Histogram') + >>> ax2.bar(x, res.cumcount, width=res.binsize) + >>> ax2.set_title('Cumulative histogram') + >>> ax2.set_xlim([x.min(), x.max()]) + + >>> plt.show() + + """ + h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) + cumhist = np.cumsum(h * 1, axis=0) + return CumfreqResult(cumhist, l, b, e) + + +RelfreqResult = namedtuple('RelfreqResult', + ('frequency', 'lowerlimit', 'binsize', + 'extrapoints')) + + +def relfreq(a, numbins=10, defaultreallimits=None, weights=None): + """Return a relative frequency histogram, using the histogram function. + + A relative frequency histogram is a mapping of the number of + observations in each of the bins relative to the total of observations. + + Parameters + ---------- + a : array_like + Input array. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultreallimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in a is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + + Returns + ------- + frequency : ndarray + Binned values of relative frequency. + lowerlimit : float + Lower real limit. + binsize : float + Width of each bin. + extrapoints : int + Extra points. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> a = np.array([2, 4, 1, 2, 3, 2]) + >>> res = stats.relfreq(a, numbins=4) + >>> res.frequency + array([ 0.16666667, 0.5 , 0.16666667, 0.16666667]) + >>> np.sum(res.frequency) # relative frequencies should add up to 1 + 1.0 + + Create a normal distribution with 1000 random values + + >>> samples = stats.norm.rvs(size=1000, random_state=rng) + + Calculate relative frequencies + + >>> res = stats.relfreq(samples, numbins=25) + + Calculate space of values for x + + >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size, + ... res.frequency.size) + + Plot relative frequency histogram + + >>> fig = plt.figure(figsize=(5, 4)) + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.bar(x, res.frequency, width=res.binsize) + >>> ax.set_title('Relative frequency histogram') + >>> ax.set_xlim([x.min(), x.max()]) + + >>> plt.show() + + """ + a = np.asanyarray(a) + h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) + h = h / a.shape[0] + + return RelfreqResult(h, l, b, e) + + +##################################### +# VARIABILITY FUNCTIONS # +##################################### + +def obrientransform(*samples): + """Compute the O'Brien transform on input data (any number of arrays). + + Used to test for homogeneity of variance prior to running one-way stats. + Each array in ``*samples`` is one level of a factor. + If `f_oneway` is run on the transformed data and found significant, + the variances are unequal. From Maxwell and Delaney [1]_, p.112. + + Parameters + ---------- + sample1, sample2, ... : array_like + Any number of arrays. + + Returns + ------- + obrientransform : ndarray + Transformed data for use in an ANOVA. The first dimension + of the result corresponds to the sequence of transformed + arrays. If the arrays given are all 1-D of the same length, + the return value is a 2-D array; otherwise it is a 1-D array + of type object, with each element being an ndarray. + + Raises + ------ + ValueError + If the mean of the transformed data is not equal to the original + variance, indicating a lack of convergence in the O'Brien transform. + + References + ---------- + .. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and + Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990. + + Examples + -------- + We'll test the following data sets for differences in their variance. + + >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10] + >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15] + + Apply the O'Brien transform to the data. + + >>> from scipy.stats import obrientransform + >>> tx, ty = obrientransform(x, y) + + Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the + transformed data. + + >>> from scipy.stats import f_oneway + >>> F, p = f_oneway(tx, ty) + >>> p + 0.1314139477040335 + + If we require that ``p < 0.05`` for significance, we cannot conclude + that the variances are different. + + """ + TINY = np.sqrt(np.finfo(float).eps) + + # `arrays` will hold the transformed arguments. + arrays = [] + sLast = None + + for sample in samples: + a = np.asarray(sample) + n = len(a) + mu = np.mean(a) + sq = (a - mu)**2 + sumsq = sq.sum() + + # The O'Brien transform. + t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2)) + + # Check that the mean of the transformed data is equal to the + # original variance. + var = sumsq / (n - 1) + if abs(var - np.mean(t)) > TINY: + raise ValueError('Lack of convergence in obrientransform.') + + arrays.append(t) + sLast = a.shape + + if sLast: + for arr in arrays[:-1]: + if sLast != arr.shape: + return np.array(arrays, dtype=object) + return np.array(arrays) + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, too_small=1 +) +def sem(a, axis=0, ddof=1, nan_policy='propagate'): + """Compute standard error of the mean. + + Calculate the standard error of the mean (or standard error of + measurement) of the values in the input array. + + Parameters + ---------- + a : array_like + An array containing the values for which the standard error is + returned. Must contain at least two observations. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Delta degrees-of-freedom. How many degrees of freedom to adjust + for bias in limited samples relative to the population estimate + of variance. Defaults to 1. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + s : ndarray or float + The standard error of the mean in the sample(s), along the input axis. + + Notes + ----- + The default value for `ddof` is different to the default (0) used by other + ddof containing routines, such as np.std and np.nanstd. + + Examples + -------- + Find standard error along the first axis: + + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(20).reshape(5,4) + >>> stats.sem(a) + array([ 2.8284, 2.8284, 2.8284, 2.8284]) + + Find standard error across the whole array, using n degrees of freedom: + + >>> stats.sem(a, axis=None, ddof=0) + 1.2893796958227628 + + """ + xp = array_namespace(a) + if axis is None: + a = xp.reshape(a, (-1,)) + axis = 0 + a = xpx.atleast_nd(xp.asarray(a), ndim=1, xp=xp) + n = a.shape[axis] + s = xp.std(a, axis=axis, correction=ddof) / n**0.5 + return s + + +def _isconst(x): + """ + Check if all values in x are the same. nans are ignored. + + x must be a 1d array. + + The return value is a 1d array with length 1, so it can be used + in np.apply_along_axis. + """ + y = x[~np.isnan(x)] + if y.size == 0: + return np.array([True]) + else: + return (y[0] == y).all(keepdims=True) + + +def zscore(a, axis=0, ddof=0, nan_policy='propagate'): + """ + Compute the z score. + + Compute the z score of each value in the sample, relative to the + sample mean and standard deviation. + + Parameters + ---------- + a : array_like + An array like object containing the sample data. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. Note that when the value is 'omit', + nans in the input also propagate to the output, but they do not affect + the z-scores computed for the non-nan values. + + Returns + ------- + zscore : array_like + The z-scores, standardized by mean and standard deviation of + input array `a`. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.std : Arithmetic standard deviation + scipy.stats.gzscore : Geometric standard score + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses `asanyarray` instead of + `asarray` for parameters). + + References + ---------- + .. [1] "Standard score", *Wikipedia*, + https://en.wikipedia.org/wiki/Standard_score. + .. [2] Huck, S. W., Cross, T. L., Clark, S. B, "Overcoming misconceptions + about Z-scores", Teaching Statistics, vol. 8, pp. 38-40, 1986 + + Examples + -------- + >>> import numpy as np + >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, + ... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508]) + >>> from scipy import stats + >>> stats.zscore(a) + array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786, + 0.6748, -1.1488, -1.3324]) + + Computing along a specified axis, using n-1 degrees of freedom + (``ddof=1``) to calculate the standard deviation: + + >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608], + ... [ 0.7149, 0.0775, 0.6072, 0.9656], + ... [ 0.6341, 0.1403, 0.9759, 0.4064], + ... [ 0.5918, 0.6948, 0.904 , 0.3721], + ... [ 0.0921, 0.2481, 0.1188, 0.1366]]) + >>> stats.zscore(b, axis=1, ddof=1) + array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358], + [ 0.33048416, -1.37380874, 0.04251374, 1.00081084], + [ 0.26796377, -1.12598418, 1.23283094, -0.37481053], + [-0.22095197, 0.24468594, 1.19042819, -1.21416216], + [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]]) + + An example with ``nan_policy='omit'``: + + >>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15], + ... [14.95, 16.06, 121.25, 94.35, 29.81]]) + >>> stats.zscore(x, axis=1, nan_policy='omit') + array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602], + [-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]]) + """ + return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy) + + +def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'): + """ + Compute the geometric standard score. + + Compute the geometric z score of each strictly positive value in the + sample, relative to the geometric mean and standard deviation. + Mathematically the geometric z score can be evaluated as:: + + gzscore = log(a/gmu) / log(gsigma) + + where ``gmu`` (resp. ``gsigma``) is the geometric mean (resp. standard + deviation). + + Parameters + ---------- + a : array_like + Sample data. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. Note that when the value is 'omit', + nans in the input also propagate to the output, but they do not affect + the geometric z scores computed for the non-nan values. + + Returns + ------- + gzscore : array_like + The geometric z scores, standardized by geometric mean and geometric + standard deviation of input array `a`. + + See Also + -------- + gmean : Geometric mean + gstd : Geometric standard deviation + zscore : Standard score + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses ``asanyarray`` instead of + ``asarray`` for parameters). + + .. versionadded:: 1.8 + + References + ---------- + .. [1] "Geometric standard score", *Wikipedia*, + https://en.wikipedia.org/wiki/Geometric_standard_deviation#Geometric_standard_score. + + Examples + -------- + Draw samples from a log-normal distribution: + + >>> import numpy as np + >>> from scipy.stats import zscore, gzscore + >>> import matplotlib.pyplot as plt + + >>> rng = np.random.default_rng() + >>> mu, sigma = 3., 1. # mean and standard deviation + >>> x = rng.lognormal(mu, sigma, size=500) + + Display the histogram of the samples: + + >>> fig, ax = plt.subplots() + >>> ax.hist(x, 50) + >>> plt.show() + + Display the histogram of the samples standardized by the classical zscore. + Distribution is rescaled but its shape is unchanged. + + >>> fig, ax = plt.subplots() + >>> ax.hist(zscore(x), 50) + >>> plt.show() + + Demonstrate that the distribution of geometric zscores is rescaled and + quasinormal: + + >>> fig, ax = plt.subplots() + >>> ax.hist(gzscore(x), 50) + >>> plt.show() + + """ + xp = array_namespace(a) + a = _convert_common_float(a, xp=xp) + log = ma.log if isinstance(a, ma.MaskedArray) else xp.log + return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy) + + +def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'): + """ + Calculate the relative z-scores. + + Return an array of z-scores, i.e., scores that are standardized to + zero mean and unit variance, where mean and variance are calculated + from the comparison array. + + Parameters + ---------- + scores : array_like + The input for which z-scores are calculated. + compare : array_like + The input from which the mean and standard deviation of the + normalization are taken; assumed to have the same dimension as + `scores`. + axis : int or None, optional + Axis over which mean and variance of `compare` are calculated. + Default is 0. If None, compute over the whole array `scores`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle the occurrence of nans in `compare`. + 'propagate' returns nan, 'raise' raises an exception, 'omit' + performs the calculations ignoring nan values. Default is + 'propagate'. Note that when the value is 'omit', nans in `scores` + also propagate to the output, but they do not affect the z-scores + computed for the non-nan values. + + Returns + ------- + zscore : array_like + Z-scores, in the same shape as `scores`. + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses `asanyarray` instead of + `asarray` for parameters). + + Examples + -------- + >>> from scipy.stats import zmap + >>> a = [0.5, 2.0, 2.5, 3] + >>> b = [0, 1, 2, 3, 4] + >>> zmap(a, b) + array([-1.06066017, 0. , 0.35355339, 0.70710678]) + + """ + # The docstring explicitly states that it preserves subclasses. + # Let's table deprecating that and just get the array API version + # working. + + like_zscore = (scores is compare) + xp = array_namespace(scores, compare) + scores, compare = _convert_common_float(scores, compare, xp=xp) + + with warnings.catch_warnings(): + if like_zscore: # zscore should not emit SmallSampleWarning + warnings.simplefilter('ignore', SmallSampleWarning) + + mn = _xp_mean(compare, axis=axis, keepdims=True, nan_policy=nan_policy) + std = _xp_var(compare, axis=axis, correction=ddof, + keepdims=True, nan_policy=nan_policy)**0.5 + + with np.errstate(invalid='ignore', divide='ignore'): + z = _demean(scores, mn, axis, xp=xp, precision_warning=False) / std + + # If we know that scores and compare are identical, we can infer that + # some slices should have NaNs. + if like_zscore: + eps = xp.finfo(z.dtype).eps + zero = std <= xp.abs(eps * mn) + zero = xp.broadcast_to(zero, z.shape) + z[zero] = xp.nan + + return z + + +def gstd(a, axis=0, ddof=1): + r""" + Calculate the geometric standard deviation of an array. + + The geometric standard deviation describes the spread of a set of numbers + where the geometric mean is preferred. It is a multiplicative factor, and + so a dimensionless quantity. + + It is defined as the exponential of the standard deviation of the + natural logarithms of the observations. + + Parameters + ---------- + a : array_like + An array containing finite, strictly positive, real numbers. + + .. deprecated:: 1.14.0 + Support for masked array input was deprecated in + SciPy 1.14.0 and will be removed in version 1.16.0. + + axis : int, tuple or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degree of freedom correction in the calculation of the + geometric standard deviation. Default is 1. + + Returns + ------- + gstd : ndarray or float + An array of the geometric standard deviation. If `axis` is None or `a` + is a 1d array a float is returned. + + See Also + -------- + gmean : Geometric mean + numpy.std : Standard deviation + gzscore : Geometric standard score + + Notes + ----- + Mathematically, the sample geometric standard deviation :math:`s_G` can be + defined in terms of the natural logarithms of the observations + :math:`y_i = \log(x_i)`: + + .. math:: + + s_G = \exp(s), \quad s = \sqrt{\frac{1}{n - d} \sum_{i=1}^n (y_i - \bar y)^2} + + where :math:`n` is the number of observations, :math:`d` is the adjustment `ddof` + to the degrees of freedom, and :math:`\bar y` denotes the mean of the natural + logarithms of the observations. Note that the default ``ddof=1`` is different from + the default value used by similar functions, such as `numpy.std` and `numpy.var`. + + When an observation is infinite, the geometric standard deviation is + NaN (undefined). Non-positive observations will also produce NaNs in the + output because the *natural* logarithm (as opposed to the *complex* + logarithm) is defined and finite only for positive reals. + The geometric standard deviation is sometimes confused with the exponential + of the standard deviation, ``exp(std(a))``. Instead, the geometric standard + deviation is ``exp(std(log(a)))``. + + References + ---------- + .. [1] "Geometric standard deviation", *Wikipedia*, + https://en.wikipedia.org/wiki/Geometric_standard_deviation. + .. [2] Kirkwood, T. B., "Geometric means and measures of dispersion", + Biometrics, vol. 35, pp. 908-909, 1979 + + Examples + -------- + Find the geometric standard deviation of a log-normally distributed sample. + Note that the standard deviation of the distribution is one; on a + log scale this evaluates to approximately ``exp(1)``. + + >>> import numpy as np + >>> from scipy.stats import gstd + >>> rng = np.random.default_rng() + >>> sample = rng.lognormal(mean=0, sigma=1, size=1000) + >>> gstd(sample) + 2.810010162475324 + + Compute the geometric standard deviation of a multidimensional array and + of a given axis. + + >>> a = np.arange(1, 25).reshape(2, 3, 4) + >>> gstd(a, axis=None) + 2.2944076136018947 + >>> gstd(a, axis=2) + array([[1.82424757, 1.22436866, 1.13183117], + [1.09348306, 1.07244798, 1.05914985]]) + >>> gstd(a, axis=(1,2)) + array([2.12939215, 1.22120169]) + + """ + a = np.asanyarray(a) + if isinstance(a, ma.MaskedArray): + message = ("`gstd` support for masked array input was deprecated in " + "SciPy 1.14.0 and will be removed in version 1.16.0.") + warnings.warn(message, DeprecationWarning, stacklevel=2) + log = ma.log + else: + log = np.log + + with np.errstate(invalid='ignore', divide='ignore'): + res = np.exp(np.std(log(a), axis=axis, ddof=ddof)) + + if (a <= 0).any(): + message = ("The geometric standard deviation is only defined if all elements " + "are greater than or equal to zero; otherwise, the result is NaN.") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + return res + +# Private dictionary initialized only once at module level +# See https://en.wikipedia.org/wiki/Robust_measures_of_scale +_scale_conversions = {'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)} + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, + default_axis=None, override={'nan_propagation': False} +) +def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate', + interpolation='linear', keepdims=False): + r""" + Compute the interquartile range of the data along the specified axis. + + The interquartile range (IQR) is the difference between the 75th and + 25th percentile of the data. It is a measure of the dispersion + similar to standard deviation or variance, but is much more robust + against outliers [2]_. + + The ``rng`` parameter allows this function to compute other + percentile ranges than the actual IQR. For example, setting + ``rng=(0, 100)`` is equivalent to `numpy.ptp`. + + The IQR of an empty array is `np.nan`. + + .. versionadded:: 0.18.0 + + Parameters + ---------- + x : array_like + Input array or object that can be converted to an array. + axis : int or sequence of int, optional + Axis along which the range is computed. The default is to + compute the IQR for the entire array. + rng : Two-element sequence containing floats in range of [0,100] optional + Percentiles over which to compute the range. Each must be + between 0 and 100, inclusive. The default is the true IQR: + ``(25, 75)``. The order of the elements is not important. + scale : scalar or str or array_like of reals, optional + The numerical value of scale will be divided out of the final + result. The following string value is also recognized: + + * 'normal' : Scale by + :math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`. + + The default is 1.0. + Array-like `scale` of real dtype is also allowed, as long + as it broadcasts correctly to the output such that + ``out / scale`` is a valid operation. The output dimensions + depend on the input array, `x`, the `axis` argument, and the + `keepdims` flag. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + interpolation : str, optional + + Specifies the interpolation method to use when the percentile + boundaries lie between two data points ``i`` and ``j``. + The following options are available (default is 'linear'): + + * 'linear': ``i + (j - i)*fraction``, where ``fraction`` is the + fractional part of the index surrounded by ``i`` and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j`` whichever is nearest. + * 'midpoint': ``(i + j)/2``. + + For NumPy >= 1.22.0, the additional options provided by the ``method`` + keyword of `numpy.percentile` are also valid. + + keepdims : bool, optional + If this is set to True, the reduced axes are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array `x`. + + Returns + ------- + iqr : scalar or ndarray + If ``axis=None``, a scalar is returned. If the input contains + integers or floats of smaller precision than ``np.float64``, then the + output data-type is ``np.float64``. Otherwise, the output data-type is + the same as that of the input. + + See Also + -------- + numpy.std, numpy.var + + References + ---------- + .. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range + .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale + .. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import iqr + >>> x = np.array([[10, 7, 4], [3, 2, 1]]) + >>> x + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> iqr(x) + 4.0 + >>> iqr(x, axis=0) + array([ 3.5, 2.5, 1.5]) + >>> iqr(x, axis=1) + array([ 3., 1.]) + >>> iqr(x, axis=1, keepdims=True) + array([[ 3.], + [ 1.]]) + + """ + x = asarray(x) + + # This check prevents percentile from raising an error later. Also, it is + # consistent with `np.var` and `np.std`. + if not x.size: + return _get_nan(x) + + # An error may be raised here, so fail-fast, before doing lengthy + # computations, even though `scale` is not used until later + if isinstance(scale, str): + scale_key = scale.lower() + if scale_key not in _scale_conversions: + raise ValueError(f"{scale} not a valid scale for `iqr`") + scale = _scale_conversions[scale_key] + + # Select the percentile function to use based on nans and policy + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + if contains_nan and nan_policy == 'omit': + percentile_func = np.nanpercentile + else: + percentile_func = np.percentile + + if len(rng) != 2: + raise TypeError("quantile range must be two element sequence") + + if np.isnan(rng).any(): + raise ValueError("range must not contain NaNs") + + rng = sorted(rng) + pct = percentile_func(x, rng, axis=axis, method=interpolation, + keepdims=keepdims) + out = np.subtract(pct[1], pct[0]) + + if scale != 1.0: + out /= scale + + return out + + +def _mad_1d(x, center, nan_policy): + # Median absolute deviation for 1-d array x. + # This is a helper function for `median_abs_deviation`; it assumes its + # arguments have been validated already. In particular, x must be a + # 1-d numpy array, center must be callable, and if nan_policy is not + # 'propagate', it is assumed to be 'omit', because 'raise' is handled + # in `median_abs_deviation`. + # No warning is generated if x is empty or all nan. + isnan = np.isnan(x) + if isnan.any(): + if nan_policy == 'propagate': + return np.nan + x = x[~isnan] + if x.size == 0: + # MAD of an empty array is nan. + return np.nan + # Edge cases have been handled, so do the basic MAD calculation. + med = center(x) + mad = np.median(np.abs(x - med)) + return mad + + +def median_abs_deviation(x, axis=0, center=np.median, scale=1.0, + nan_policy='propagate'): + r""" + Compute the median absolute deviation of the data along the given axis. + + The median absolute deviation (MAD, [1]_) computes the median over the + absolute deviations from the median. It is a measure of dispersion + similar to the standard deviation but more robust to outliers [2]_. + + The MAD of an empty array is ``np.nan``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + x : array_like + Input array or object that can be converted to an array. + axis : int or None, optional + Axis along which the range is computed. Default is 0. If None, compute + the MAD over the entire array. + center : callable, optional + A function that will return the central value. The default is to use + np.median. Any user defined function used will need to have the + function signature ``func(arr, axis)``. + scale : scalar or str, optional + The numerical value of scale will be divided out of the final + result. The default is 1.0. The string "normal" is also accepted, + and results in `scale` being the inverse of the standard normal + quantile function at 0.75, which is approximately 0.67449. + Array-like scale is also allowed, as long as it broadcasts correctly + to the output such that ``out / scale`` is a valid operation. The + output dimensions depend on the input array, `x`, and the `axis` + argument. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + mad : scalar or ndarray + If ``axis=None``, a scalar is returned. If the input contains + integers or floats of smaller precision than ``np.float64``, then the + output data-type is ``np.float64``. Otherwise, the output data-type is + the same as that of the input. + + See Also + -------- + numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean, + scipy.stats.tstd, scipy.stats.tvar + + Notes + ----- + The `center` argument only affects the calculation of the central value + around which the MAD is calculated. That is, passing in ``center=np.mean`` + will calculate the MAD around the mean - it will not calculate the *mean* + absolute deviation. + + The input array may contain `inf`, but if `center` returns `inf`, the + corresponding MAD for that data will be `nan`. + + References + ---------- + .. [1] "Median absolute deviation", + https://en.wikipedia.org/wiki/Median_absolute_deviation + .. [2] "Robust measures of scale", + https://en.wikipedia.org/wiki/Robust_measures_of_scale + + Examples + -------- + When comparing the behavior of `median_abs_deviation` with ``np.std``, + the latter is affected when we change a single value of an array to have an + outlier value while the MAD hardly changes: + + >>> import numpy as np + >>> from scipy import stats + >>> x = stats.norm.rvs(size=100, scale=1, random_state=123456) + >>> x.std() + 0.9973906394005013 + >>> stats.median_abs_deviation(x) + 0.82832610097857 + >>> x[0] = 345.6 + >>> x.std() + 34.42304872314415 + >>> stats.median_abs_deviation(x) + 0.8323442311590675 + + Axis handling example: + + >>> x = np.array([[10, 7, 4], [3, 2, 1]]) + >>> x + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> stats.median_abs_deviation(x) + array([3.5, 2.5, 1.5]) + >>> stats.median_abs_deviation(x, axis=None) + 2.0 + + Scale normal example: + + >>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456) + >>> stats.median_abs_deviation(x) + 1.3487398527041636 + >>> stats.median_abs_deviation(x, scale='normal') + 1.9996446978061115 + + """ + if not callable(center): + raise TypeError("The argument 'center' must be callable. The given " + f"value {repr(center)} is not callable.") + + # An error may be raised here, so fail-fast, before doing lengthy + # computations, even though `scale` is not used until later + if isinstance(scale, str): + if scale.lower() == 'normal': + scale = 0.6744897501960817 # special.ndtri(0.75) + else: + raise ValueError(f"{scale} is not a valid scale value.") + + x = asarray(x) + + # Consistent with `np.var` and `np.std`. + if not x.size: + if axis is None: + return np.nan + nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis) + if nan_shape == (): + # Return nan, not array(nan) + return np.nan + return np.full(nan_shape, np.nan) + + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + if contains_nan: + if axis is None: + mad = _mad_1d(x.ravel(), center, nan_policy) + else: + mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy) + else: + if axis is None: + med = center(x, axis=None) + mad = np.median(np.abs(x - med)) + else: + # Wrap the call to center() in expand_dims() so it acts like + # keepdims=True was used. + med = np.expand_dims(center(x, axis=axis), axis) + mad = np.median(np.abs(x - med), axis=axis) + + return mad / scale + + +##################################### +# TRIMMING FUNCTIONS # +##################################### + + +SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper')) + + +def sigmaclip(a, low=4., high=4.): + """Perform iterative sigma-clipping of array elements. + + Starting from the full sample, all elements outside the critical range are + removed, i.e. all elements of the input array `c` that satisfy either of + the following conditions:: + + c < mean(c) - std(c)*low + c > mean(c) + std(c)*high + + The iteration continues with the updated sample until no + elements are outside the (updated) range. + + Parameters + ---------- + a : array_like + Data array, will be raveled if not 1-D. + low : float, optional + Lower bound factor of sigma clipping. Default is 4. + high : float, optional + Upper bound factor of sigma clipping. Default is 4. + + Returns + ------- + clipped : ndarray + Input array with clipped elements removed. + lower : float + Lower threshold value use for clipping. + upper : float + Upper threshold value use for clipping. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import sigmaclip + >>> a = np.concatenate((np.linspace(9.5, 10.5, 31), + ... np.linspace(0, 20, 5))) + >>> fact = 1.5 + >>> c, low, upp = sigmaclip(a, fact, fact) + >>> c + array([ 9.96666667, 10. , 10.03333333, 10. ]) + >>> c.var(), c.std() + (0.00055555555555555165, 0.023570226039551501) + >>> low, c.mean() - fact*c.std(), c.min() + (9.9646446609406727, 9.9646446609406727, 9.9666666666666668) + >>> upp, c.mean() + fact*c.std(), c.max() + (10.035355339059327, 10.035355339059327, 10.033333333333333) + + >>> a = np.concatenate((np.linspace(9.5, 10.5, 11), + ... np.linspace(-100, -50, 3))) + >>> c, low, upp = sigmaclip(a, 1.8, 1.8) + >>> (c == np.linspace(9.5, 10.5, 11)).all() + True + + """ + c = np.asarray(a).ravel() + delta = 1 + while delta: + c_std = c.std() + c_mean = c.mean() + size = c.size + critlower = c_mean - c_std * low + critupper = c_mean + c_std * high + c = c[(c >= critlower) & (c <= critupper)] + delta = size - c.size + + return SigmaclipResult(c, critlower, critupper) + + +def trimboth(a, proportiontocut, axis=0): + """Slice off a proportion of items from both ends of an array. + + Slice off the passed proportion of items from both ends of the passed + array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and** + rightmost 10% of scores). The trimmed values are the lowest and + highest ones. + Slice off less if proportion results in a non-integer slice index (i.e. + conservatively slices off `proportiontocut`). + + Parameters + ---------- + a : array_like + Data to trim. + proportiontocut : float + Proportion (in range 0-1) of total data set to trim of each end. + axis : int or None, optional + Axis along which to trim data. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + out : ndarray + Trimmed version of array `a`. The order of the trimmed content + is undefined. + + See Also + -------- + trim_mean + + Examples + -------- + Create an array of 10 values and trim 10% of those values from each end: + + >>> import numpy as np + >>> from scipy import stats + >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> stats.trimboth(a, 0.1) + array([1, 3, 2, 4, 5, 6, 7, 8]) + + Note that the elements of the input array are trimmed by value, but the + output array is not necessarily sorted. + + The proportion to trim is rounded down to the nearest integer. For + instance, trimming 25% of the values from each end of an array of 10 + values will return an array of 6 values: + + >>> b = np.arange(10) + >>> stats.trimboth(b, 1/4).shape + (6,) + + Multidimensional arrays can be trimmed along any axis or across the entire + array: + + >>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9] + >>> d = np.array([a, b, c]) + >>> stats.trimboth(d, 0.4, axis=0).shape + (1, 10) + >>> stats.trimboth(d, 0.4, axis=1).shape + (3, 2) + >>> stats.trimboth(d, 0.4, axis=None).shape + (6,) + + """ + a = np.asarray(a) + + if a.size == 0: + return a + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut >= uppercut): + raise ValueError("Proportion too big.") + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return atmp[tuple(sl)] + + +def trim1(a, proportiontocut, tail='right', axis=0): + """Slice off a proportion from ONE end of the passed array distribution. + + If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost' + 10% of scores. The lowest or highest values are trimmed (depending on + the tail). + Slice off less if proportion results in a non-integer slice index + (i.e. conservatively slices off `proportiontocut` ). + + Parameters + ---------- + a : array_like + Input array. + proportiontocut : float + Fraction to cut off of 'left' or 'right' of distribution. + tail : {'left', 'right'}, optional + Defaults to 'right'. + axis : int or None, optional + Axis along which to trim data. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + trim1 : ndarray + Trimmed version of array `a`. The order of the trimmed content is + undefined. + + Examples + -------- + Create an array of 10 values and trim 20% of its lowest values: + + >>> import numpy as np + >>> from scipy import stats + >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> stats.trim1(a, 0.2, 'left') + array([2, 4, 3, 5, 6, 7, 8, 9]) + + Note that the elements of the input array are trimmed by value, but the + output array is not necessarily sorted. + + The proportion to trim is rounded down to the nearest integer. For + instance, trimming 25% of the values from an array of 10 values will + return an array of 8 values: + + >>> b = np.arange(10) + >>> stats.trim1(b, 1/4).shape + (8,) + + Multidimensional arrays can be trimmed along any axis or across the entire + array: + + >>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9] + >>> d = np.array([a, b, c]) + >>> stats.trim1(d, 0.8, axis=0).shape + (1, 10) + >>> stats.trim1(d, 0.8, axis=1).shape + (3, 2) + >>> stats.trim1(d, 0.8, axis=None).shape + (6,) + + """ + a = np.asarray(a) + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + + # avoid possible corner case + if proportiontocut >= 1: + return [] + + if tail.lower() == 'right': + lowercut = 0 + uppercut = nobs - int(proportiontocut * nobs) + + elif tail.lower() == 'left': + lowercut = int(proportiontocut * nobs) + uppercut = nobs + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return atmp[tuple(sl)] + + +def trim_mean(a, proportiontocut, axis=0): + """Return mean of array after trimming a specified fraction of extreme values + + Removes the specified proportion of elements from *each* end of the + sorted array, then computes the mean of the remaining elements. + + Parameters + ---------- + a : array_like + Input array. + proportiontocut : float + Fraction of the most positive and most negative elements to remove. + When the specified proportion does not result in an integer number of + elements, the number of elements to trim is rounded down. + axis : int or None, default: 0 + Axis along which the trimmed means are computed. + If None, compute over the raveled array. + + Returns + ------- + trim_mean : ndarray + Mean of trimmed array. + + See Also + -------- + trimboth : Remove a proportion of elements from each end of an array. + tmean : Compute the mean after trimming values outside specified limits. + + Notes + ----- + For 1-D array `a`, `trim_mean` is approximately equivalent to the following + calculation:: + + import numpy as np + a = np.sort(a) + m = int(proportiontocut * len(a)) + np.mean(a[m: len(a) - m]) + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = [1, 2, 3, 5] + >>> stats.trim_mean(x, 0.25) + 2.5 + + When the specified proportion does not result in an integer number of + elements, the number of elements to trim is rounded down. + + >>> stats.trim_mean(x, 0.24999) == np.mean(x) + True + + Use `axis` to specify the axis along which the calculation is performed. + + >>> x2 = [[1, 2, 3, 5], + ... [10, 20, 30, 50]] + >>> stats.trim_mean(x2, 0.25) + array([ 5.5, 11. , 16.5, 27.5]) + >>> stats.trim_mean(x2, 0.25, axis=1) + array([ 2.5, 25. ]) + + """ + a = np.asarray(a) + + if a.size == 0: + return np.nan + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut > uppercut): + raise ValueError("Proportion too big.") + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return np.mean(atmp[tuple(sl)], axis=axis) + + +F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) + + +def _create_f_oneway_nan_result(shape, axis, samples): + """ + This is a helper function for f_oneway for creating the return values + in certain degenerate conditions. It creates return values that are + all nan with the appropriate shape for the given `shape` and `axis`. + """ + axis = normalize_axis_index(axis, len(shape)) + shp = shape[:axis] + shape[axis+1:] + f = np.full(shp, fill_value=_get_nan(*samples)) + prob = f.copy() + return F_onewayResult(f[()], prob[()]) + + +def _first(arr, axis): + """Return arr[..., 0:1, ...] where 0:1 is in the `axis` position.""" + return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis) + + +def _f_oneway_is_too_small(samples, kwargs=None, axis=-1): + message = f"At least two samples are required; got {len(samples)}." + if len(samples) < 2: + raise TypeError(message) + + # Check this after forming alldata, so shape errors are detected + # and reported before checking for 0 length inputs. + if any(sample.shape[axis] == 0 for sample in samples): + return True + + # Must have at least one group with length greater than 1. + if all(sample.shape[axis] == 1 for sample in samples): + msg = ('all input arrays have length 1. f_oneway requires that at ' + 'least one input has length greater than 1.') + warnings.warn(SmallSampleWarning(msg), stacklevel=2) + return True + + return False + + +@_axis_nan_policy_factory( + F_onewayResult, n_samples=None, too_small=_f_oneway_is_too_small) +def f_oneway(*samples, axis=0): + """Perform one-way ANOVA. + + The one-way ANOVA tests the null hypothesis that two or more groups have + the same population mean. The test is applied to samples from two or + more groups, possibly with differing sizes. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample measurements for each group. There must be at least + two arguments. If the arrays are multidimensional, then all the + dimensions of the array must be the same except for `axis`. + axis : int, optional + Axis of the input arrays along which the test is applied. + Default is 0. + + Returns + ------- + statistic : float + The computed F statistic of the test. + pvalue : float + The associated p-value from the F distribution. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Emitted if all values within each of the input arrays are identical. + In this case the F statistic is either infinite or isn't defined, + so ``np.inf`` or ``np.nan`` is returned. + + RuntimeWarning + Emitted if the length of any input array is 0, or if all the input + arrays have length 1. ``np.nan`` is returned for the F statistic + and the p-value in these cases. + + Notes + ----- + The ANOVA test has important assumptions that must be satisfied in order + for the associated p-value to be valid. + + 1. The samples are independent. + 2. Each sample is from a normally distributed population. + 3. The population standard deviations of the groups are all equal. This + property is known as homoscedasticity. + + If these assumptions are not true for a given set of data, it may still + be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or + the Alexander-Govern test (`scipy.stats.alexandergovern`) although with + some loss of power. + + The length of each group must be at least one, and there must be at + least one group with length greater than one. If these conditions + are not satisfied, a warning is generated and (``np.nan``, ``np.nan``) + is returned. + + If all values in each group are identical, and there exist at least two + groups with different values, the function generates a warning and + returns (``np.inf``, 0). + + If all values in all groups are the same, function generates a warning + and returns (``np.nan``, ``np.nan``). + + The algorithm is from Heiman [2]_, pp.394-7. + + References + ---------- + .. [1] R. Lowry, "Concepts and Applications of Inferential Statistics", + Chapter 14, 2014, http://vassarstats.net/textbook/ + + .. [2] G.W. Heiman, "Understanding research methods and statistics: An + integrated introduction for psychology", Houghton, Mifflin and + Company, 2001. + + .. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA. + http://www.biostathandbook.com/onewayanova.html + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import f_oneway + + Here are some data [3]_ on a shell measurement (the length of the anterior + adductor muscle scar, standardized by dividing by length) in the mussel + Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon; + Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a + much larger data set used in McDonald et al. (1991). + + >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735, + ... 0.0659, 0.0923, 0.0836] + >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835, + ... 0.0725] + >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105] + >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764, + ... 0.0689] + >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045] + >>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne) + F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544) + + `f_oneway` accepts multidimensional input arrays. When the inputs + are multidimensional and `axis` is not given, the test is performed + along the first axis of the input arrays. For the following data, the + test is performed three times, once for each column. + + >>> a = np.array([[9.87, 9.03, 6.81], + ... [7.18, 8.35, 7.00], + ... [8.39, 7.58, 7.68], + ... [7.45, 6.33, 9.35], + ... [6.41, 7.10, 9.33], + ... [8.00, 8.24, 8.44]]) + >>> b = np.array([[6.35, 7.30, 7.16], + ... [6.65, 6.68, 7.63], + ... [5.72, 7.73, 6.72], + ... [7.01, 9.19, 7.41], + ... [7.75, 7.87, 8.30], + ... [6.90, 7.97, 6.97]]) + >>> c = np.array([[3.31, 8.77, 1.01], + ... [8.25, 3.24, 3.62], + ... [6.32, 8.81, 5.19], + ... [7.48, 8.83, 8.91], + ... [8.59, 6.01, 6.07], + ... [3.07, 9.72, 7.48]]) + >>> F = f_oneway(a, b, c) + >>> F.statistic + array([1.75676344, 0.03701228, 3.76439349]) + >>> F.pvalue + array([0.20630784, 0.96375203, 0.04733157]) + + """ + if len(samples) < 2: + raise TypeError('at least two inputs are required;' + f' got {len(samples)}.') + + # ANOVA on N groups, each in its own array + num_groups = len(samples) + + # We haven't explicitly validated axis, but if it is bad, this call of + # np.concatenate will raise np.exceptions.AxisError. The call will raise + # ValueError if the dimensions of all the arrays, except the axis + # dimension, are not the same. + alldata = np.concatenate(samples, axis=axis) + bign = alldata.shape[axis] + + # Check if the inputs are too small + if _f_oneway_is_too_small(samples): + return _create_f_oneway_nan_result(alldata.shape, axis, samples) + + # Check if all values within each group are identical, and if the common + # value in at least one group is different from that in another group. + # Based on https://github.com/scipy/scipy/issues/11669 + + # If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ..., + # then is_const is a boolean array with shape (num_groups, ...). + # It is True if the values within the groups along the axis slice are + # identical. In the typical case where each input array is 1-d, is_const is + # a 1-d array with length num_groups. + is_const = np.concatenate( + [(_first(sample, axis) == sample).all(axis=axis, + keepdims=True) + for sample in samples], + axis=axis + ) + + # all_const is a boolean array with shape (...) (see previous comment). + # It is True if the values within each group along the axis slice are + # the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]). + all_const = is_const.all(axis=axis) + if all_const.any(): + msg = ("Each of the input arrays is constant; " + "the F statistic is not defined or infinite") + warnings.warn(stats.ConstantInputWarning(msg), stacklevel=2) + + # all_same_const is True if all the values in the groups along the axis=0 + # slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]). + all_same_const = (_first(alldata, axis) == alldata).all(axis=axis) + + # Determine the mean of the data, and subtract that from all inputs to a + # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant + # to a shift in location, and centering all data around zero vastly + # improves numerical stability. + offset = alldata.mean(axis=axis, keepdims=True) + alldata = alldata - offset + + normalized_ss = _square_of_sums(alldata, axis=axis) / bign + + sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss + + ssbn = 0 + for sample in samples: + smo_ss = _square_of_sums(sample - offset, axis=axis) + ssbn = ssbn + smo_ss / sample.shape[axis] + + # Naming: variables ending in bn/b are for "between treatments", wn/w are + # for "within treatments" + ssbn = ssbn - normalized_ss + sswn = sstot - ssbn + dfbn = num_groups - 1 + dfwn = bign - num_groups + msb = ssbn / dfbn + msw = sswn / dfwn + with np.errstate(divide='ignore', invalid='ignore'): + f = msb / msw + + prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf + + # Fix any f values that should be inf or nan because the corresponding + # inputs were constant. + if np.isscalar(f): + if all_same_const: + f = np.nan + prob = np.nan + elif all_const: + f = np.inf + prob = 0.0 + else: + f[all_const] = np.inf + prob[all_const] = 0.0 + f[all_same_const] = np.nan + prob[all_same_const] = np.nan + + return F_onewayResult(f, prob) + + +@dataclass +class AlexanderGovernResult: + statistic: float + pvalue: float + + +@_axis_nan_policy_factory( + AlexanderGovernResult, n_samples=None, + result_to_tuple=lambda x: (x.statistic, x.pvalue), + too_small=1 +) +def alexandergovern(*samples, nan_policy='propagate', axis=0): + """Performs the Alexander Govern test. + + The Alexander-Govern approximation tests the equality of k independent + means in the face of heterogeneity of variance. The test is applied to + samples from two or more groups, possibly with differing sizes. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample measurements for each group. There must be at least + two samples, and each sample must contain at least two observations. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + res : AlexanderGovernResult + An object with attributes: + + statistic : float + The computed A statistic of the test. + pvalue : float + The associated p-value from the chi-squared distribution. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The statistic is not defined + in this case, so ``np.nan`` is returned. + + See Also + -------- + f_oneway : one-way ANOVA + + Notes + ----- + The use of this test relies on several assumptions. + + 1. The samples are independent. + 2. Each sample is from a normally distributed population. + 3. Unlike `f_oneway`, this test does not assume on homoscedasticity, + instead relaxing the assumption of equal variances. + + Input samples must be finite, one dimensional, and with size greater than + one. + + References + ---------- + .. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler + Approximation for ANOVA under Variance Heterogeneity." Journal + of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101. + JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020. + + Examples + -------- + >>> from scipy.stats import alexandergovern + + Here are some data on annual percentage rate of interest charged on + new car loans at nine of the largest banks in four American cities + taken from the National Institute of Standards and Technology's + ANOVA dataset. + + We use `alexandergovern` to test the null hypothesis that all cities + have the same mean APR against the alternative that the cities do not + all have the same mean APR. We decide that a significance level of 5% + is required to reject the null hypothesis in favor of the alternative. + + >>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5] + >>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9] + >>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5] + >>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25, + ... 11.89] + >>> alexandergovern(atlanta, chicago, houston, memphis) + AlexanderGovernResult(statistic=4.65087071883494, + pvalue=0.19922132490385214) + + The p-value is 0.1992, indicating a nearly 20% chance of observing + such an extreme value of the test statistic under the null hypothesis. + This exceeds 5%, so we do not reject the null hypothesis in favor of + the alternative. + + """ + samples = _alexandergovern_input_validation(samples, nan_policy, axis) + + # The following formula numbers reference the equation described on + # page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other + # tests that serve as the basis for equation (8) but are not needed + # to perform the test. + + # precalculate mean and length of each sample + lengths = [sample.shape[-1] for sample in samples] + means = np.asarray([_xp_mean(sample, axis=-1) for sample in samples]) + + # (1) determine standard error of the mean for each sample + se2 = [(_xp_var(sample, correction=1, axis=-1) / length) + for sample, length in zip(samples, lengths)] + standard_errors_squared = np.asarray(se2) + standard_errors = standard_errors_squared**0.5 + + # Special case: statistic is NaN when variance is zero + eps = np.finfo(standard_errors.dtype).eps + zero = standard_errors <= np.abs(eps * means) + NaN = np.asarray(np.nan, dtype=standard_errors.dtype) + standard_errors = np.where(zero, NaN, standard_errors) + + # (2) define a weight for each sample + inv_sq_se = 1 / standard_errors_squared + weights = inv_sq_se / np.sum(inv_sq_se, axis=0, keepdims=True) + + # (3) determine variance-weighted estimate of the common mean + var_w = np.sum(weights * means, axis=0, keepdims=True) + + # (4) determine one-sample t statistic for each group + t_stats = _demean(means, var_w, axis=0, xp=np) / standard_errors + + # calculate parameters to be used in transformation + v = np.asarray(lengths) - 1 + # align along 0th axis, which corresponds with separate samples + v = np.reshape(v, (-1,) + (1,)*(t_stats.ndim-1)) + a = v - .5 + b = 48 * a**2 + c = (a * np.log(1 + (t_stats ** 2)/v))**.5 + + # (8) perform a normalizing transformation on t statistic + z = (c + ((c**3 + 3*c)/b) - + ((4*c**7 + 33*c**5 + 240*c**3 + 855*c) / + (b**2*10 + 8*b*c**4 + 1000*b))) + + # (9) calculate statistic + A = np.sum(z**2, axis=0) + + # "[the p value is determined from] central chi-square random deviates + # with k - 1 degrees of freedom". Alexander, Govern (94) + df = len(samples) - 1 + chi2 = _SimpleChi2(df) + p = _get_pvalue(A, chi2, alternative='greater', symmetric=False, xp=np) + return AlexanderGovernResult(A, p) + + +def _alexandergovern_input_validation(samples, nan_policy, axis): + if len(samples) < 2: + raise TypeError(f"2 or more inputs required, got {len(samples)}") + + for sample in samples: + if sample.shape[axis] <= 1: + raise ValueError("Input sample size must be greater than one.") + + samples = [np.moveaxis(sample, axis, -1) for sample in samples] + + return samples + + +def _pearsonr_fisher_ci(r, n, confidence_level, alternative): + """ + Compute the confidence interval for Pearson's R. + + Fisher's transformation is used to compute the confidence interval + (https://en.wikipedia.org/wiki/Fisher_transformation). + """ + xp = array_namespace(r) + + with np.errstate(divide='ignore'): + zr = xp.atanh(r) + + ones = xp.ones_like(r) + n = xp.asarray(n, dtype=r.dtype) + confidence_level = xp.asarray(confidence_level, dtype=r.dtype) + if n > 3: + se = xp.sqrt(1 / (n - 3)) + if alternative == "two-sided": + h = special.ndtri(0.5 + confidence_level/2) + zlo = zr - h*se + zhi = zr + h*se + rlo = xp.tanh(zlo) + rhi = xp.tanh(zhi) + elif alternative == "less": + h = special.ndtri(confidence_level) + zhi = zr + h*se + rhi = xp.tanh(zhi) + rlo = -ones + else: + # alternative == "greater": + h = special.ndtri(confidence_level) + zlo = zr - h*se + rlo = xp.tanh(zlo) + rhi = ones + else: + rlo, rhi = -ones, ones + + rlo = rlo[()] if rlo.ndim == 0 else rlo + rhi = rhi[()] if rhi.ndim == 0 else rhi + return ConfidenceInterval(low=rlo, high=rhi) + + +def _pearsonr_bootstrap_ci(confidence_level, method, x, y, alternative, axis): + """ + Compute the confidence interval for Pearson's R using the bootstrap. + """ + def statistic(x, y, axis): + statistic, _ = pearsonr(x, y, axis=axis) + return statistic + + res = bootstrap((x, y), statistic, confidence_level=confidence_level, axis=axis, + paired=True, alternative=alternative, **method._asdict()) + # for one-sided confidence intervals, bootstrap gives +/- inf on one side + res.confidence_interval = np.clip(res.confidence_interval, -1, 1) + + return ConfidenceInterval(*res.confidence_interval) + + +ConfidenceInterval = namedtuple('ConfidenceInterval', ['low', 'high']) + +PearsonRResultBase = _make_tuple_bunch('PearsonRResultBase', + ['statistic', 'pvalue'], []) + + +class PearsonRResult(PearsonRResultBase): + """ + Result of `scipy.stats.pearsonr` + + Attributes + ---------- + statistic : float + Pearson product-moment correlation coefficient. + pvalue : float + The p-value associated with the chosen alternative. + + Methods + ------- + confidence_interval + Computes the confidence interval of the correlation + coefficient `statistic` for the given confidence level. + + """ + def __init__(self, statistic, pvalue, alternative, n, x, y, axis): + super().__init__(statistic, pvalue) + self._alternative = alternative + self._n = n + self._x = x + self._y = y + self._axis = axis + + # add alias for consistency with other correlation functions + self.correlation = statistic + + def confidence_interval(self, confidence_level=0.95, method=None): + """ + The confidence interval for the correlation coefficient. + + Compute the confidence interval for the correlation coefficient + ``statistic`` with the given confidence level. + + If `method` is not provided, + The confidence interval is computed using the Fisher transformation + F(r) = arctanh(r) [1]_. When the sample pairs are drawn from a + bivariate normal distribution, F(r) approximately follows a normal + distribution with standard error ``1/sqrt(n - 3)``, where ``n`` is the + length of the original samples along the calculation axis. When + ``n <= 3``, this approximation does not yield a finite, real standard + error, so we define the confidence interval to be -1 to 1. + + If `method` is an instance of `BootstrapMethod`, the confidence + interval is computed using `scipy.stats.bootstrap` with the provided + configuration options and other appropriate settings. In some cases, + confidence limits may be NaN due to a degenerate resample, and this is + typical for very small samples (~6 observations). + + Parameters + ---------- + confidence_level : float + The confidence level for the calculation of the correlation + coefficient confidence interval. Default is 0.95. + + method : BootstrapMethod, optional + Defines the method used to compute the confidence interval. See + method description for details. + + .. versionadded:: 1.11.0 + + Returns + ------- + ci : namedtuple + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + References + ---------- + .. [1] "Pearson correlation coefficient", Wikipedia, + https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + """ + if isinstance(method, BootstrapMethod): + xp = array_namespace(self._x) + message = ('`method` must be `None` if `pearsonr` ' + 'arguments were not NumPy arrays.') + if not is_numpy(xp): + raise ValueError(message) + + ci = _pearsonr_bootstrap_ci(confidence_level, method, self._x, self._y, + self._alternative, self._axis) + elif method is None: + ci = _pearsonr_fisher_ci(self.statistic, self._n, confidence_level, + self._alternative) + else: + message = ('`method` must be an instance of `BootstrapMethod` ' + 'or None.') + raise ValueError(message) + return ci + + +def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0): + r""" + Pearson correlation coefficient and p-value for testing non-correlation. + + The Pearson correlation coefficient [1]_ measures the linear relationship + between two datasets. Like other correlation + coefficients, this one varies between -1 and +1 with 0 implying no + correlation. Correlations of -1 or +1 imply an exact linear relationship. + Positive correlations imply that as x increases, so does y. Negative + correlations imply that as x increases, y decreases. + + This function also performs a test of the null hypothesis that the + distributions underlying the samples are uncorrelated and normally + distributed. (See Kowalski [3]_ + for a discussion of the effects of non-normality of the input on the + distribution of the correlation coefficient.) + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Pearson correlation at least as extreme + as the one computed from these datasets. + + Parameters + ---------- + x : array_like + Input array. + y : array_like + Input array. + axis : int or None, default + Axis along which to perform the calculation. Default is 0. + If None, ravel both arrays before performing the calculation. + + .. versionadded:: 1.13.0 + alternative : {'two-sided', 'greater', 'less'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the correlation is nonzero + * 'less': the correlation is negative (less than zero) + * 'greater': the correlation is positive (greater than zero) + + .. versionadded:: 1.9.0 + method : ResamplingMethod, optional + Defines the method used to compute the p-value. If `method` is an + instance of `PermutationMethod`/`MonteCarloMethod`, the p-value is + computed using + `scipy.stats.permutation_test`/`scipy.stats.monte_carlo_test` with the + provided configuration options and other appropriate settings. + Otherwise, the p-value is computed as documented in the notes. + + .. versionadded:: 1.11.0 + + Returns + ------- + result : `~scipy.stats._result_classes.PearsonRResult` + An object with the following attributes: + + statistic : float + Pearson product-moment correlation coefficient. + pvalue : float + The p-value associated with the chosen alternative. + + The object has the following method: + + confidence_interval(confidence_level, method) + This computes the confidence interval of the correlation + coefficient `statistic` for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. If `method` is not provided, the + confidence interval is computed using the Fisher transformation + [1]_. If `method` is an instance of `BootstrapMethod`, the + confidence interval is computed using `scipy.stats.bootstrap` with + the provided configuration options and other appropriate settings. + In some cases, confidence limits may be NaN due to a degenerate + resample, and this is typical for very small samples (~6 + observations). + + Raises + ------ + ValueError + If `x` and `y` do not have length at least 2. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The correlation coefficient + is not defined in this case, so ``np.nan`` is returned. + + `~scipy.stats.NearConstantInputWarning` + Raised if an input is "nearly" constant. The array ``x`` is considered + nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``. + Numerical errors in the calculation ``x - mean(x)`` in this case might + result in an inaccurate calculation of r. + + See Also + -------- + spearmanr : Spearman rank-order correlation coefficient. + kendalltau : Kendall's tau, a correlation measure for ordinal data. + + Notes + ----- + The correlation coefficient is calculated as follows: + + .. math:: + + r = \frac{\sum (x - m_x) (y - m_y)} + {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}} + + where :math:`m_x` is the mean of the vector x and :math:`m_y` is + the mean of the vector y. + + Under the assumption that x and y are drawn from + independent normal distributions (so the population correlation coefficient + is 0), the probability density function of the sample correlation + coefficient r is ([1]_, [2]_): + + .. math:: + f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)} + + where n is the number of samples, and B is the beta function. This + is sometimes referred to as the exact distribution of r. This is + the distribution that is used in `pearsonr` to compute the p-value when + the `method` parameter is left at its default value (None). + The distribution is a beta distribution on the interval [-1, 1], + with equal shape parameters a = b = n/2 - 1. In terms of SciPy's + implementation of the beta distribution, the distribution of r is:: + + dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2) + + The default p-value returned by `pearsonr` is a two-sided p-value. For a + given sample with correlation coefficient r, the p-value is + the probability that abs(r') of a random sample x' and y' drawn from + the population with zero correlation would be greater than or equal + to abs(r). In terms of the object ``dist`` shown above, the p-value + for a given r and length n can be computed as:: + + p = 2*dist.cdf(-abs(r)) + + When n is 2, the above continuous distribution is not well-defined. + One can interpret the limit of the beta distribution as the shape + parameters a and b approach a = b = 0 as a discrete distribution with + equal probability masses at r = 1 and r = -1. More directly, one + can observe that, given the data x = [x1, x2] and y = [y1, y2], and + assuming x1 != x2 and y1 != y2, the only possible values for r are 1 + and -1. Because abs(r') for any sample x' and y' with length 2 will + be 1, the two-sided p-value for a sample of length 2 is always 1. + + For backwards compatibility, the object that is returned also behaves + like a tuple of length two that holds the statistic and the p-value. + + References + ---------- + .. [1] "Pearson correlation coefficient", Wikipedia, + https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + .. [2] Student, "Probable error of a correlation coefficient", + Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310. + .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution + of the Sample Product-Moment Correlation Coefficient" + Journal of the Royal Statistical Society. Series C (Applied + Statistics), Vol. 21, No. 1 (1972), pp. 1-12. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x, y = [1, 2, 3, 4, 5, 6, 7], [10, 9, 2.5, 6, 4, 3, 2] + >>> res = stats.pearsonr(x, y) + >>> res + PearsonRResult(statistic=-0.828503883588428, pvalue=0.021280260007523286) + + To perform an exact permutation version of the test: + + >>> rng = np.random.default_rng(7796654889291491997) + >>> method = stats.PermutationMethod(n_resamples=np.inf, random_state=rng) + >>> stats.pearsonr(x, y, method=method) + PearsonRResult(statistic=-0.828503883588428, pvalue=0.028174603174603175) + + To perform the test under the null hypothesis that the data were drawn from + *uniform* distributions: + + >>> method = stats.MonteCarloMethod(rvs=(rng.uniform, rng.uniform)) + >>> stats.pearsonr(x, y, method=method) + PearsonRResult(statistic=-0.828503883588428, pvalue=0.0188) + + To produce an asymptotic 90% confidence interval: + + >>> res.confidence_interval(confidence_level=0.9) + ConfidenceInterval(low=-0.9644331982722841, high=-0.3460237473272273) + + And for a bootstrap confidence interval: + + >>> method = stats.BootstrapMethod(method='BCa', rng=rng) + >>> res.confidence_interval(confidence_level=0.9, method=method) + ConfidenceInterval(low=-0.9983163756488651, high=-0.22771001702132443) # may vary + + If N-dimensional arrays are provided, multiple tests are performed in a + single call according to the same conventions as most `scipy.stats` functions: + + >>> rng = np.random.default_rng(2348246935601934321) + >>> x = rng.standard_normal((8, 15)) + >>> y = rng.standard_normal((8, 15)) + >>> stats.pearsonr(x, y, axis=0).statistic.shape # between corresponding columns + (15,) + >>> stats.pearsonr(x, y, axis=1).statistic.shape # between corresponding rows + (8,) + + To perform all pairwise comparisons between slices of the arrays, + use standard NumPy broadcasting techniques. For instance, to compute the + correlation between all pairs of rows: + + >>> stats.pearsonr(x[:, np.newaxis, :], y, axis=-1).statistic.shape + (8, 8) + + There is a linear dependence between x and y if y = a + b*x + e, where + a,b are constants and e is a random error term, assumed to be independent + of x. For simplicity, assume that x is standard normal, a=0, b=1 and let + e follow a normal distribution with mean zero and standard deviation s>0. + + >>> rng = np.random.default_rng() + >>> s = 0.5 + >>> x = stats.norm.rvs(size=500, random_state=rng) + >>> e = stats.norm.rvs(scale=s, size=500, random_state=rng) + >>> y = x + e + >>> stats.pearsonr(x, y).statistic + 0.9001942438244763 + + This should be close to the exact value given by + + >>> 1/np.sqrt(1 + s**2) + 0.8944271909999159 + + For s=0.5, we observe a high level of correlation. In general, a large + variance of the noise reduces the correlation, while the correlation + approaches one as the variance of the error goes to zero. + + It is important to keep in mind that no correlation does not imply + independence unless (x, y) is jointly normal. Correlation can even be zero + when there is a very simple dependence structure: if X follows a + standard normal distribution, let y = abs(x). Note that the correlation + between x and y is zero. Indeed, since the expectation of x is zero, + cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero + by symmetry. The following lines of code illustrate this observation: + + >>> y = np.abs(x) + >>> stats.pearsonr(x, y) + PearsonRResult(statistic=-0.05444919272687482, pvalue=0.22422294836207743) + + A non-zero correlation coefficient can be misleading. For example, if X has + a standard normal distribution, define y = x if x < 0 and y = 0 otherwise. + A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797..., + implying a high level of correlation: + + >>> y = np.where(x < 0, x, 0) + >>> stats.pearsonr(x, y) + PearsonRResult(statistic=0.861985781588, pvalue=4.813432002751103e-149) + + This is unintuitive since there is no dependence of x and y if x is larger + than zero which happens in about half of the cases if we sample x and y. + + """ + xp = array_namespace(x, y) + x = xp.asarray(x) + y = xp.asarray(y) + + if not is_numpy(xp) and method is not None: + method = 'invalid' + + if axis is None: + x = xp.reshape(x, (-1,)) + y = xp.reshape(y, (-1,)) + axis = -1 + + axis_int = int(axis) + if axis_int != axis: + raise ValueError('`axis` must be an integer.') + axis = axis_int + + n = x.shape[axis] + if n != y.shape[axis]: + raise ValueError('`x` and `y` must have the same length along `axis`.') + + if n < 2: + raise ValueError('`x` and `y` must have length at least 2.') + + try: + x, y = xp.broadcast_arrays(x, y) + except (ValueError, RuntimeError) as e: + message = '`x` and `y` must be broadcastable.' + raise ValueError(message) from e + + # `moveaxis` only recently added to array API, so it's not yey available in + # array_api_strict. Replace with e.g. `xp.moveaxis(x, axis, -1)` when available. + x = xp_moveaxis_to_end(x, axis, xp=xp) + y = xp_moveaxis_to_end(y, axis, xp=xp) + axis = -1 + + dtype = xp.result_type(x.dtype, y.dtype) + if xp.isdtype(dtype, "integral"): + dtype = xp.asarray(1.).dtype + + if xp.isdtype(dtype, "complex floating"): + raise ValueError('This function does not support complex data') + + x = xp.astype(x, dtype, copy=False) + y = xp.astype(y, dtype, copy=False) + threshold = xp.finfo(dtype).eps ** 0.75 + + # If an input is constant, the correlation coefficient is not defined. + const_x = xp.all(x == x[..., 0:1], axis=-1) + const_y = xp.all(y == y[..., 0:1], axis=-1) + const_xy = const_x | const_y + if xp.any(const_xy): + msg = ("An input array is constant; the correlation coefficient " + "is not defined.") + warnings.warn(stats.ConstantInputWarning(msg), stacklevel=2) + + if isinstance(method, PermutationMethod): + def statistic(y, axis): + statistic, _ = pearsonr(x, y, axis=axis, alternative=alternative) + return statistic + + res = permutation_test((y,), statistic, permutation_type='pairings', + axis=axis, alternative=alternative, **method._asdict()) + + return PearsonRResult(statistic=res.statistic, pvalue=res.pvalue, n=n, + alternative=alternative, x=x, y=y, axis=axis) + elif isinstance(method, MonteCarloMethod): + def statistic(x, y, axis): + statistic, _ = pearsonr(x, y, axis=axis, alternative=alternative) + return statistic + + # `monte_carlo_test` accepts an `rvs` tuple of callables, not an `rng` + # If the user specified an `rng`, replace it with the appropriate callables + method = method._asdict() + if (rng := method.pop('rng', None)) is not None: # goo-goo g'joob + rng = np.random.default_rng(rng) + method['rvs'] = rng.normal, rng.normal + + res = monte_carlo_test((x, y,), statistic=statistic, axis=axis, + alternative=alternative, **method) + + return PearsonRResult(statistic=res.statistic, pvalue=res.pvalue, n=n, + alternative=alternative, x=x, y=y, axis=axis) + elif method == 'invalid': + message = '`method` must be `None` if arguments are not NumPy arrays.' + raise ValueError(message) + elif method is not None: + message = ('`method` must be an instance of `PermutationMethod`,' + '`MonteCarloMethod`, or None.') + raise ValueError(message) + + xmean = xp.mean(x, axis=axis, keepdims=True) + ymean = xp.mean(y, axis=axis, keepdims=True) + xm = x - xmean + ym = y - ymean + + # scipy.linalg.norm(xm) avoids premature overflow when xm is e.g. + # [-5e210, 5e210, 3e200, -3e200] + # but not when `axis` is provided, so scale manually. scipy.linalg.norm + # also raises an error with NaN input rather than returning NaN, so + # use np.linalg.norm. + xmax = xp.max(xp.abs(xm), axis=axis, keepdims=True) + ymax = xp.max(xp.abs(ym), axis=axis, keepdims=True) + with np.errstate(invalid='ignore', divide='ignore'): + normxm = xmax * xp_vector_norm(xm/xmax, axis=axis, keepdims=True) + normym = ymax * xp_vector_norm(ym/ymax, axis=axis, keepdims=True) + + nconst_x = xp.any(normxm < threshold*xp.abs(xmean), axis=axis) + nconst_y = xp.any(normym < threshold*xp.abs(ymean), axis=axis) + nconst_xy = nconst_x | nconst_y + if xp.any(nconst_xy & (~const_xy)): + # If all the values in x (likewise y) are very close to the mean, + # the loss of precision that occurs in the subtraction xm = x - xmean + # might result in large errors in r. + msg = ("An input array is nearly constant; the computed " + "correlation coefficient may be inaccurate.") + warnings.warn(stats.NearConstantInputWarning(msg), stacklevel=2) + + with np.errstate(invalid='ignore', divide='ignore'): + r = xp.sum(xm/normxm * ym/normym, axis=axis) + + # Presumably, if abs(r) > 1, then it is only some small artifact of + # floating point arithmetic. + one = xp.asarray(1, dtype=dtype) + r = xp.asarray(xp.clip(r, -one, one)) + r[const_xy] = xp.nan + + # Make sure we return exact 1.0 or -1.0 values for n == 2 case as promised + # in the docs. + if n == 2: + r = xp.round(r) + one = xp.asarray(1, dtype=dtype) + pvalue = xp.where(xp.asarray(xp.isnan(r)), xp.nan*one, one) + else: + # As explained in the docstring, the distribution of `r` under the null + # hypothesis is the beta distribution on (-1, 1) with a = b = n/2 - 1. + ab = xp.asarray(n/2 - 1) + dist = _SimpleBeta(ab, ab, loc=-1, scale=2) + pvalue = _get_pvalue(r, dist, alternative, xp=xp) + + r = r[()] if r.ndim == 0 else r + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + return PearsonRResult(statistic=r, pvalue=pvalue, n=n, + alternative=alternative, x=x, y=y, axis=axis) + + +def fisher_exact(table, alternative=None, *, method=None): + """Perform a Fisher exact test on a contingency table. + + For a 2x2 table, + the null hypothesis is that the true odds ratio of the populations + underlying the observations is one, and the observations were sampled + from these populations under a condition: the marginals of the + resulting table must equal those of the observed table. + The statistic is the unconditional maximum likelihood estimate of the odds + ratio, and the p-value is the probability under the null hypothesis of + obtaining a table at least as extreme as the one that was actually + observed. + + For other table sizes, or if `method` is provided, the null hypothesis + is that the rows and columns of the tables have fixed sums and are + independent; i.e., the table was sampled from a `scipy.stats.random_table` + distribution with the observed marginals. The statistic is the + probability mass of this distribution evaluated at `table`, and the + p-value is the percentage of the population of tables with statistic at + least as extreme (small) as that of `table`. There is only one alternative + hypothesis available: the rows and columns are not independent. + + There are other possible choices of statistic and two-sided + p-value definition associated with Fisher's exact test; please see the + Notes for more information. + + Parameters + ---------- + table : array_like of ints + A contingency table. Elements must be non-negative integers. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis for 2x2 tables; unused for other + table sizes. + The following options are available (default is 'two-sided'): + + * 'two-sided': the odds ratio of the underlying population is not one + * 'less': the odds ratio of the underlying population is less than one + * 'greater': the odds ratio of the underlying population is greater + than one + + See the Notes for more details. + + method : ResamplingMethod, optional + Defines the method used to compute the p-value. + If `method` is an instance of `PermutationMethod`/`MonteCarloMethod`, + the p-value is computed using + `scipy.stats.permutation_test`/`scipy.stats.monte_carlo_test` with the + provided configuration options and other appropriate settings. + Note that if `method` is an instance of `MonteCarloMethod`, the ``rvs`` + attribute must be left unspecified; Monte Carlo samples are always drawn + using the ``rvs`` method of `scipy.stats.random_table`. + Otherwise, the p-value is computed as documented in the notes. + + .. versionadded:: 1.15.0 + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + For a 2x2 table with default `method`, this is the odds ratio - the + prior odds ratio not a posterior estimate. In all other cases, this + is the probability density of obtaining the observed table under the + null hypothesis of independence with marginals fixed. + pvalue : float + The probability under the null hypothesis of obtaining a + table at least as extreme as the one that was actually observed. + + Raises + ------ + ValueError + If `table` is not two-dimensional or has negative entries. + + See Also + -------- + chi2_contingency : Chi-square test of independence of variables in a + contingency table. This can be used as an alternative to + `fisher_exact` when the numbers in the table are large. + contingency.odds_ratio : Compute the odds ratio (sample or conditional + MLE) for a 2x2 contingency table. + barnard_exact : Barnard's exact test, which is a more powerful alternative + than Fisher's exact test for 2x2 contingency tables. + boschloo_exact : Boschloo's exact test, which is a more powerful + alternative than Fisher's exact test for 2x2 contingency tables. + :ref:`hypothesis_fisher_exact` : Extended example + + Notes + ----- + *Null hypothesis and p-values* + + The null hypothesis is that the true odds ratio of the populations + underlying the observations is one, and the observations were sampled at + random from these populations under a condition: the marginals of the + resulting table must equal those of the observed table. Equivalently, + the null hypothesis is that the input table is from the hypergeometric + distribution with parameters (as used in `hypergeom`) + ``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the + input table is ``[[a, b], [c, d]]``. This distribution has support + ``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values + in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x`` + can be interpreted as the upper-left element of a 2x2 table, so the + tables in the distribution have form:: + + [ x n - x ] + [N - x M - (n + N) + x] + + For example, if:: + + table = [6 2] + [1 4] + + then the support is ``2 <= x <= 7``, and the tables in the distribution + are:: + + [2 6] [3 5] [4 4] [5 3] [6 2] [7 1] + [5 0] [4 1] [3 2] [2 3] [1 4] [0 5] + + The probability of each table is given by the hypergeometric distribution + ``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to + three significant digits):: + + x 2 3 4 5 6 7 + p 0.0163 0.163 0.408 0.326 0.0816 0.00466 + + These can be computed with:: + + >>> import numpy as np + >>> from scipy.stats import hypergeom + >>> table = np.array([[6, 2], [1, 4]]) + >>> M = table.sum() + >>> n = table[0].sum() + >>> N = table[:, 0].sum() + >>> start, end = hypergeom.support(M, n, N) + >>> hypergeom.pmf(np.arange(start, end+1), M, n, N) + array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508, + 0.004662 ]) + + The two-sided p-value is the probability that, under the null hypothesis, + a random table would have a probability equal to or less than the + probability of the input table. For our example, the probability of + the input table (where ``x = 6``) is 0.0816. The x values where the + probability does not exceed this are 2, 6 and 7, so the two-sided p-value + is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``:: + + >>> from scipy.stats import fisher_exact + >>> res = fisher_exact(table, alternative='two-sided') + >>> res.pvalue + 0.10256410256410257 + + The one-sided p-value for ``alternative='greater'`` is the probability + that a random table has ``x >= a``, which in our example is ``x >= 6``, + or ``0.0816 + 0.00466 ~= 0.08626``:: + + >>> res = fisher_exact(table, alternative='greater') + >>> res.pvalue + 0.08624708624708627 + + This is equivalent to computing the survival function of the + distribution at ``x = 5`` (one less than ``x`` from the input table, + because we want to include the probability of ``x = 6`` in the sum):: + + >>> hypergeom.sf(5, M, n, N) + 0.08624708624708627 + + For ``alternative='less'``, the one-sided p-value is the probability + that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example), + or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``:: + + >>> res = fisher_exact(table, alternative='less') + >>> res.pvalue + 0.9953379953379957 + + This is equivalent to computing the cumulative distribution function + of the distribution at ``x = 6``: + + >>> hypergeom.cdf(6, M, n, N) + 0.9953379953379957 + + *Odds ratio* + + The calculated odds ratio is different from the value computed by the + R function ``fisher.test``. This implementation returns the "sample" + or "unconditional" maximum likelihood estimate, while ``fisher.test`` + in R uses the conditional maximum likelihood estimate. To compute the + conditional maximum likelihood estimate of the odds ratio, use + `scipy.stats.contingency.odds_ratio`. + + References + ---------- + .. [1] Fisher, Sir Ronald A, "The Design of Experiments: + Mathematics of a Lady Tasting Tea." ISBN 978-0-486-41151-4, 1935. + .. [2] "Fisher's exact test", + https://en.wikipedia.org/wiki/Fisher's_exact_test + + Examples + -------- + + >>> from scipy.stats import fisher_exact + >>> res = fisher_exact([[8, 2], [1, 5]]) + >>> res.statistic + 20.0 + >>> res.pvalue + 0.034965034965034975 + + For tables with shape other than ``(2, 2)``, provide an instance of + `scipy.stats.MonteCarloMethod` or `scipy.stats.PermutationMethod` for the + `method` parameter: + + >>> import numpy as np + >>> from scipy.stats import MonteCarloMethod + >>> rng = np.random.default_rng(4507195762371367) + >>> method = MonteCarloMethod(rng=rng) + >>> fisher_exact([[8, 2, 3], [1, 5, 4]], method=method) + SignificanceResult(statistic=np.float64(0.005782), pvalue=np.float64(0.0603)) + + For a more detailed example, see :ref:`hypothesis_fisher_exact`. + """ + hypergeom = distributions.hypergeom + # int32 is not enough for the algorithm + c = np.asarray(table, dtype=np.int64) + if not c.ndim == 2: + raise ValueError("The input `table` must have two dimensions.") + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if not c.shape == (2, 2) or method is not None: + return _fisher_exact_rxc(c, alternative, method) + alternative = 'two-sided' if alternative is None else alternative + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1 and + # the odds ratio is NaN. + return SignificanceResult(np.nan, 1.0) + + if c[1, 0] > 0 and c[0, 1] > 0: + oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1]) + else: + oddsratio = np.inf + + n1 = c[0, 0] + c[0, 1] + n2 = c[1, 0] + c[1, 1] + n = c[0, 0] + c[1, 0] + + def pmf(x): + return hypergeom.pmf(x, n1 + n2, n1, n) + + if alternative == 'less': + pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) + elif alternative == 'greater': + # Same formula as the 'less' case, but with the second column. + pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1]) + elif alternative == 'two-sided': + mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2)) + pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n) + pmode = hypergeom.pmf(mode, n1 + n2, n1, n) + + epsilon = 1e-14 + gamma = 1 + epsilon + + if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= epsilon: + return SignificanceResult(oddsratio, 1.) + + elif c[0, 0] < mode: + plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) + if hypergeom.pmf(n, n1 + n2, n1, n) > pexact * gamma: + return SignificanceResult(oddsratio, plower) + + guess = _binary_search(lambda x: -pmf(x), -pexact * gamma, mode, n) + pvalue = plower + hypergeom.sf(guess, n1 + n2, n1, n) + else: + pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n) + if hypergeom.pmf(0, n1 + n2, n1, n) > pexact * gamma: + return SignificanceResult(oddsratio, pupper) + + guess = _binary_search(pmf, pexact * gamma, 0, mode) + pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n) + else: + msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}" + raise ValueError(msg) + + pvalue = min(pvalue, 1.0) + + return SignificanceResult(oddsratio, pvalue) + + +def _fisher_exact_rxc(table, alternative, method): + if alternative is not None: + message = ('`alternative` must be the default (None) unless ' + '`table` has shape `(2, 2)` and `method is None`.') + raise ValueError(message) + + if table.size == 0: + raise ValueError("`table` must have at least one row and one column.") + + if table.shape[0] == 1 or table.shape[1] == 1 or np.all(table == 0): + # Only one such table with those marginals + return SignificanceResult(1.0, 1.0) + + if method is None: + method = stats.MonteCarloMethod() + + if isinstance(method, stats.PermutationMethod): + res = _fisher_exact_permutation_method(table, method) + elif isinstance(method, stats.MonteCarloMethod): + res = _fisher_exact_monte_carlo_method(table, method) + else: + message = (f'`{method=}` not recognized; if provided, `method` must be an ' + 'instance of `PermutationMethod` or `MonteCarloMethod`.') + raise ValueError(message) + + return SignificanceResult(np.clip(res.statistic, None, 1.0), res.pvalue) + + +def _fisher_exact_permutation_method(table, method): + x, y = _untabulate(table) + colsums = np.sum(table, axis=0) + rowsums = np.sum(table, axis=1) + X = stats.random_table(rowsums, colsums) + + # `permutation_test` with `permutation_type='pairings' permutes the order of `x`, + # which pairs observations in `x` with different observations in `y`. + def statistic(x): + # crosstab the resample and compute the statistic + table = stats.contingency.crosstab(x, y)[1] + return X.pmf(table) + + # tables with *smaller* probability mass are considered to be more extreme + return stats.permutation_test((x,), statistic, permutation_type='pairings', + alternative='less', **method._asdict()) + + +def _fisher_exact_monte_carlo_method(table, method): + method = method._asdict() + + if method.pop('rvs', None) is not None: + message = ('If the `method` argument of `fisher_exact` is an ' + 'instance of `MonteCarloMethod`, its `rvs` attribute ' + 'must be unspecified. Use the `MonteCarloMethod` `rng` argument ' + 'to control the random state.') + raise ValueError(message) + rng = np.random.default_rng(method.pop('rng', None)) + + # `random_table.rvs` produces random contingency tables with the given marginals + # under the null hypothesis of independence + shape = table.shape + colsums = np.sum(table, axis=0) + rowsums = np.sum(table, axis=1) + totsum = np.sum(table) + X = stats.random_table(rowsums, colsums, seed=rng) + + def rvs(size): + n_resamples = size[0] + return X.rvs(size=n_resamples).reshape(size) + + # axis signals to `monte_carlo_test` that statistic is vectorized, but we know + # how it will pass the table(s), so we don't need to use `axis` explicitly. + def statistic(table, axis): + shape_ = (-1,) + shape if table.size > totsum else shape + return X.pmf(table.reshape(shape_)) + + # tables with *smaller* probability mass are considered to be more extreme + return stats.monte_carlo_test(table.ravel(), rvs, statistic, + alternative='less', **method) + + +def _untabulate(table): + # converts a contingency table to paired samples indicating the + # correspondence between row and column indices + r, c = table.shape + x, y = [], [] + for i in range(r): + for j in range(c): + x.append([i] * table[i, j]) + y.append([j] * table[i, j]) + return np.concatenate(x), np.concatenate(y) + + +def spearmanr(a, b=None, axis=0, nan_policy='propagate', + alternative='two-sided'): + r"""Calculate a Spearman correlation coefficient with associated p-value. + + The Spearman rank-order correlation coefficient is a nonparametric measure + of the monotonicity of the relationship between two datasets. + Like other correlation coefficients, + this one varies between -1 and +1 with 0 implying no correlation. + Correlations of -1 or +1 imply an exact monotonic relationship. Positive + correlations imply that as x increases, so does y. Negative correlations + imply that as x increases, y decreases. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Spearman correlation at least as extreme + as the one computed from these datasets. Although calculation of the + p-value does not make strong assumptions about the distributions underlying + the samples, it is only accurate for very large samples (>500 + observations). For smaller sample sizes, consider a permutation test (see + Examples section below). + + Parameters + ---------- + a, b : 1D or 2D array_like, b is optional + One or two 1-D or 2-D arrays containing multiple variables and + observations. When these are 1-D, each represents a vector of + observations of a single variable. For the behavior in the 2-D case, + see under ``axis``, below. + Both arrays need to have the same length in the ``axis`` dimension. + axis : int or None, optional + If axis=0 (default), then each column represents a variable, with + observations in the rows. If axis=1, the relationship is transposed: + each row represents a variable, while the columns contain observations. + If axis=None, then both arrays will be raveled. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the correlation is nonzero + * 'less': the correlation is negative (less than zero) + * 'greater': the correlation is positive (greater than zero) + + .. versionadded:: 1.7.0 + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float or ndarray (2-D square) + Spearman correlation matrix or correlation coefficient (if only 2 + variables are given as parameters). Correlation matrix is square + with length equal to total number of variables (columns or rows) in + ``a`` and ``b`` combined. + pvalue : float + The p-value for a hypothesis test whose null hypothesis + is that two samples have no ordinal correlation. See + `alternative` above for alternative hypotheses. `pvalue` has the + same shape as `statistic`. + + Raises + ------ + ValueError + If `axis` is not 0, 1 or None, or if the number of dimensions of `a` + is greater than 2, or if `b` is None and the number of dimensions of + `a` is less than 2. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The correlation coefficient + is not defined in this case, so ``np.nan`` is returned. + + See Also + -------- + :ref:`hypothesis_spearmanr` : Extended example + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + Section 14.7 + .. [2] Kendall, M. G. and Stuart, A. (1973). + The Advanced Theory of Statistics, Volume 2: Inference and Relationship. + Griffin. 1973. + Section 31.18 + + Examples + -------- + + >>> import numpy as np + >>> from scipy import stats + >>> res = stats.spearmanr([1, 2, 3, 4, 5], [5, 6, 7, 8, 7]) + >>> res.statistic + 0.8207826816681233 + >>> res.pvalue + 0.08858700531354381 + + >>> rng = np.random.default_rng() + >>> x2n = rng.standard_normal((100, 2)) + >>> y2n = rng.standard_normal((100, 2)) + >>> res = stats.spearmanr(x2n) + >>> res.statistic, res.pvalue + (-0.07960396039603959, 0.4311168705769747) + + >>> res = stats.spearmanr(x2n[:, 0], x2n[:, 1]) + >>> res.statistic, res.pvalue + (-0.07960396039603959, 0.4311168705769747) + + >>> res = stats.spearmanr(x2n, y2n) + >>> res.statistic + array([[ 1. , -0.07960396, -0.08314431, 0.09662166], + [-0.07960396, 1. , -0.14448245, 0.16738074], + [-0.08314431, -0.14448245, 1. , 0.03234323], + [ 0.09662166, 0.16738074, 0.03234323, 1. ]]) + >>> res.pvalue + array([[0. , 0.43111687, 0.41084066, 0.33891628], + [0.43111687, 0. , 0.15151618, 0.09600687], + [0.41084066, 0.15151618, 0. , 0.74938561], + [0.33891628, 0.09600687, 0.74938561, 0. ]]) + + >>> res = stats.spearmanr(x2n.T, y2n.T, axis=1) + >>> res.statistic + array([[ 1. , -0.07960396, -0.08314431, 0.09662166], + [-0.07960396, 1. , -0.14448245, 0.16738074], + [-0.08314431, -0.14448245, 1. , 0.03234323], + [ 0.09662166, 0.16738074, 0.03234323, 1. ]]) + + >>> res = stats.spearmanr(x2n, y2n, axis=None) + >>> res.statistic, res.pvalue + (0.044981624540613524, 0.5270803651336189) + + >>> res = stats.spearmanr(x2n.ravel(), y2n.ravel()) + >>> res.statistic, res.pvalue + (0.044981624540613524, 0.5270803651336189) + + >>> rng = np.random.default_rng() + >>> xint = rng.integers(10, size=(100, 2)) + >>> res = stats.spearmanr(xint) + >>> res.statistic, res.pvalue + (0.09800224850707953, 0.3320271757932076) + + For small samples, consider performing a permutation test instead of + relying on the asymptotic p-value. Note that to calculate the null + distribution of the statistic (for all possibly pairings between + observations in sample ``x`` and ``y``), only one of the two inputs needs + to be permuted. + + >>> x = [1.76405235, 0.40015721, 0.97873798, + ... 2.2408932, 1.86755799, -0.97727788] + >>> y = [2.71414076, 0.2488, 0.87551913, + ... 2.6514917, 2.01160156, 0.47699563] + + >>> def statistic(x): # permute only `x` + ... return stats.spearmanr(x, y).statistic + >>> res_exact = stats.permutation_test((x,), statistic, + ... permutation_type='pairings') + >>> res_asymptotic = stats.spearmanr(x, y) + >>> res_exact.pvalue, res_asymptotic.pvalue # asymptotic pvalue is too low + (0.10277777777777777, 0.07239650145772594) + + For a more detailed example, see :ref:`hypothesis_spearmanr`. + """ + if axis is not None and axis > 1: + raise ValueError("spearmanr only handles 1-D or 2-D arrays, " + f"supplied axis argument {axis}, please use only " + "values 0, 1 or None for axis") + + a, axisout = _chk_asarray(a, axis) + if a.ndim > 2: + raise ValueError("spearmanr only handles 1-D or 2-D arrays") + + if b is None: + if a.ndim < 2: + raise ValueError("`spearmanr` needs at least 2 " + "variables to compare") + else: + # Concatenate a and b, so that we now only have to handle the case + # of a 2-D `a`. + b, _ = _chk_asarray(b, axis) + if axisout == 0: + a = np.column_stack((a, b)) + else: + a = np.vstack((a, b)) + + n_vars = a.shape[1 - axisout] + n_obs = a.shape[axisout] + if n_obs <= 1: + # Handle empty arrays or single observations. + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + warn_msg = ("An input array is constant; the correlation coefficient " + "is not defined.") + if axisout == 0: + if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all(): + # If an input is constant, the correlation coefficient + # is not defined. + warnings.warn(stats.ConstantInputWarning(warn_msg), stacklevel=2) + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + else: # case when axisout == 1 b/c a is 2 dim only + if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all(): + # If an input is constant, the correlation coefficient + # is not defined. + warnings.warn(stats.ConstantInputWarning(warn_msg), stacklevel=2) + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + a_contains_nan, nan_policy = _contains_nan(a, nan_policy) + variable_has_nan = np.zeros(n_vars, dtype=bool) + if a_contains_nan: + if nan_policy == 'omit': + return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy, + alternative=alternative) + elif nan_policy == 'propagate': + if a.ndim == 1 or n_vars <= 2: + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + else: + # Keep track of variables with NaNs, set the outputs to NaN + # only for those variables + variable_has_nan = np.isnan(a).any(axis=axisout) + + a_ranked = np.apply_along_axis(rankdata, axisout, a) + rs = np.corrcoef(a_ranked, rowvar=axisout) + dof = n_obs - 2 # degrees of freedom + + # rs can have elements equal to 1, so avoid zero division warnings + with np.errstate(divide='ignore'): + # clip the small negative values possibly caused by rounding + # errors before taking the square root + t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0)) + + dist = _SimpleStudentT(dof) + prob = _get_pvalue(t, dist, alternative, xp=np) + + # For backwards compatibility, return scalars when comparing 2 columns + if rs.shape == (2, 2): + res = SignificanceResult(rs[1, 0], prob[1, 0]) + res.correlation = rs[1, 0] + return res + else: + rs[variable_has_nan, :] = np.nan + rs[:, variable_has_nan] = np.nan + res = SignificanceResult(rs[()], prob[()]) + res.correlation = rs + return res + + +def pointbiserialr(x, y): + r"""Calculate a point biserial correlation coefficient and its p-value. + + The point biserial correlation is used to measure the relationship + between a binary variable, x, and a continuous variable, y. Like other + correlation coefficients, this one varies between -1 and +1 with 0 + implying no correlation. Correlations of -1 or +1 imply a determinative + relationship. + + This function may be computed using a shortcut formula but produces the + same result as `pearsonr`. + + Parameters + ---------- + x : array_like of bools + Input array. + y : array_like + Input array. + + Returns + ------- + res: SignificanceResult + An object containing attributes: + + statistic : float + The R value. + pvalue : float + The two-sided p-value. + + Notes + ----- + `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom. + It is equivalent to `pearsonr`. + + The value of the point-biserial correlation can be calculated from: + + .. math:: + + r_{pb} = \frac{\overline{Y_1} - \overline{Y_0}} + {s_y} + \sqrt{\frac{N_0 N_1} + {N (N - 1)}} + + Where :math:`\overline{Y_{0}}` and :math:`\overline{Y_{1}}` are means + of the metric observations coded 0 and 1 respectively; :math:`N_{0}` and + :math:`N_{1}` are number of observations coded 0 and 1 respectively; + :math:`N` is the total number of observations and :math:`s_{y}` is the + standard deviation of all the metric observations. + + A value of :math:`r_{pb}` that is significantly different from zero is + completely equivalent to a significant difference in means between the two + groups. Thus, an independent groups t Test with :math:`N-2` degrees of + freedom may be used to test whether :math:`r_{pb}` is nonzero. The + relation between the t-statistic for comparing two independent groups and + :math:`r_{pb}` is given by: + + .. math:: + + t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}} + + References + ---------- + .. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math. + Statist., Vol. 20, no.1, pp. 125-126, 1949. + + .. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous + Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25, + np. 3, pp. 603-607, 1954. + + .. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef: + Statistics Reference Online (eds N. Balakrishnan, et al.), 2014. + :doi:`10.1002/9781118445112.stat06227` + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) + >>> b = np.arange(7) + >>> stats.pointbiserialr(a, b) + (0.8660254037844386, 0.011724811003954652) + >>> stats.pearsonr(a, b) + (0.86602540378443871, 0.011724811003954626) + >>> np.corrcoef(a, b) + array([[ 1. , 0.8660254], + [ 0.8660254, 1. ]]) + + """ + rpb, prob = pearsonr(x, y) + # create result object with alias for backward compatibility + res = SignificanceResult(rpb, prob) + res.correlation = rpb + return res + + +def kendalltau(x, y, *, nan_policy='propagate', + method='auto', variant='b', alternative='two-sided'): + r"""Calculate Kendall's tau, a correlation measure for ordinal data. + + Kendall's tau is a measure of the correspondence between two rankings. + Values close to 1 indicate strong agreement, and values close to -1 + indicate strong disagreement. This implements two variants of Kendall's + tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These + differ only in how they are normalized to lie within the range -1 to 1; + the hypothesis tests (their p-values) are identical. Kendall's original + tau-a is not implemented separately because both tau-b and tau-c reduce + to tau-a in the absence of ties. + + Parameters + ---------- + x, y : array_like + Arrays of rankings, of the same shape. If arrays are not 1-D, they + will be flattened to 1-D. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + method : {'auto', 'asymptotic', 'exact'}, optional + Defines which method is used to calculate the p-value [5]_. + The following options are available (default is 'auto'): + + * 'auto': selects the appropriate method based on a trade-off + between speed and accuracy + * 'asymptotic': uses a normal approximation valid for large samples + * 'exact': computes the exact p-value, but can only be used if no ties + are present. As the sample size increases, the 'exact' computation + time may grow and the result may lose some precision. + + variant : {'b', 'c'}, optional + Defines which variant of Kendall's tau is returned. Default is 'b'. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the rank correlation is nonzero + * 'less': the rank correlation is negative (less than zero) + * 'greater': the rank correlation is positive (greater than zero) + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + The tau statistic. + pvalue : float + The p-value for a hypothesis test whose null hypothesis is + an absence of association, tau = 0. + + Raises + ------ + ValueError + If `nan_policy` is 'omit' and `variant` is not 'b' or + if `method` is 'exact' and there are ties between `x` and `y`. + + See Also + -------- + spearmanr : Calculates a Spearman rank-order correlation coefficient. + theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). + weightedtau : Computes a weighted version of Kendall's tau. + :ref:`hypothesis_kendalltau` : Extended example + + Notes + ----- + The definition of Kendall's tau that is used is [2]_:: + + tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U)) + + tau_c = 2 (P - Q) / (n**2 * (m - 1) / m) + + where P is the number of concordant pairs, Q the number of discordant + pairs, T the number of ties only in `x`, and U the number of ties only in + `y`. If a tie occurs for the same pair in both `x` and `y`, it is not + added to either T or U. n is the total number of samples, and m is the + number of unique values in either `x` or `y`, whichever is smaller. + + References + ---------- + .. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika + Vol. 30, No. 1/2, pp. 81-93, 1938. + .. [2] Maurice G. Kendall, "The treatment of ties in ranking problems", + Biometrika Vol. 33, No. 3, pp. 239-251. 1945. + .. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John + Wiley & Sons, 1967. + .. [4] Peter M. Fenwick, "A new data structure for cumulative frequency + tables", Software: Practice and Experience, Vol. 24, No. 3, + pp. 327-336, 1994. + .. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), + Charles Griffin & Co., 1970. + + Examples + -------- + + >>> from scipy import stats + >>> x1 = [12, 2, 1, 12, 2] + >>> x2 = [1, 4, 7, 1, 0] + >>> res = stats.kendalltau(x1, x2) + >>> res.statistic + -0.47140452079103173 + >>> res.pvalue + 0.2827454599327748 + + For a more detailed example, see :ref:`hypothesis_kendalltau`. + """ + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + if x.size != y.size: + raise ValueError("All inputs to `kendalltau` must be of the same " + f"size, found x-size {x.size} and y-size {y.size}") + elif not x.size or not y.size: + # Return NaN if arrays are empty + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + # check both x and y + cnx, npx = _contains_nan(x, nan_policy) + cny, npy = _contains_nan(y, nan_policy) + contains_nan = cnx or cny + if npx == 'omit' or npy == 'omit': + nan_policy = 'omit' + + if contains_nan and nan_policy == 'propagate': + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + elif contains_nan and nan_policy == 'omit': + x = ma.masked_invalid(x) + y = ma.masked_invalid(y) + if variant == 'b': + return mstats_basic.kendalltau(x, y, method=method, use_ties=True, + alternative=alternative) + else: + message = ("nan_policy='omit' is currently compatible only with " + "variant='b'.") + raise ValueError(message) + + def count_rank_tie(ranks): + cnt = np.bincount(ranks).astype('int64', copy=False) + cnt = cnt[cnt > 1] + # Python ints to avoid overflow down the line + return (int((cnt * (cnt - 1) // 2).sum()), + int((cnt * (cnt - 1.) * (cnt - 2)).sum()), + int((cnt * (cnt - 1.) * (2*cnt + 5)).sum())) + + size = x.size + perm = np.argsort(y) # sort on y and convert y to dense ranks + x, y = x[perm], y[perm] + y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp) + + # stable sort on x and convert x to dense ranks + perm = np.argsort(x, kind='mergesort') + x, y = x[perm], y[perm] + x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp) + + dis = _kendall_dis(x, y) # discordant pairs + + obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True] + cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False) + + ntie = int((cnt * (cnt - 1) // 2).sum()) # joint ties + xtie, x0, x1 = count_rank_tie(x) # ties in x, stats + ytie, y0, y1 = count_rank_tie(y) # ties in y, stats + + tot = (size * (size - 1)) // 2 + + if xtie == tot or ytie == tot: + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie + # = con + dis + xtie + ytie - ntie + con_minus_dis = tot - xtie - ytie + ntie - 2 * dis + if variant == 'b': + tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie) + elif variant == 'c': + minclasses = min(len(set(x)), len(set(y))) + tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses) + else: + raise ValueError(f"Unknown variant of the method chosen: {variant}. " + "variant must be 'b' or 'c'.") + + # Limit range to fix computational errors + tau = np.minimum(1., max(-1., tau)) + + # The p-value calculation is the same for all variants since the p-value + # depends only on con_minus_dis. + if method == 'exact' and (xtie != 0 or ytie != 0): + raise ValueError("Ties found, exact method cannot be used.") + + if method == 'auto': + if (xtie == 0 and ytie == 0) and (size <= 33 or + min(dis, tot-dis) <= 1): + method = 'exact' + else: + method = 'asymptotic' + + if xtie == 0 and ytie == 0 and method == 'exact': + pvalue = mstats_basic._kendall_p_exact(size, tot-dis, alternative) + elif method == 'asymptotic': + # con_minus_dis is approx normally distributed with this variance [3]_ + m = size * (size - 1.) + var = ((m * (2*size + 5) - x1 - y1) / 18 + + (2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2))) + z = con_minus_dis / np.sqrt(var) + pvalue = _get_pvalue(z, _SimpleNormal(), alternative, xp=np) + else: + raise ValueError(f"Unknown method {method} specified. Use 'auto', " + "'exact' or 'asymptotic'.") + + # create result object with alias for backward compatibility + res = SignificanceResult(tau[()], pvalue[()]) + res.correlation = tau[()] + return res + + +def weightedtau(x, y, rank=True, weigher=None, additive=True): + r"""Compute a weighted version of Kendall's :math:`\tau`. + + The weighted :math:`\tau` is a weighted version of Kendall's + :math:`\tau` in which exchanges of high weight are more influential than + exchanges of low weight. The default parameters compute the additive + hyperbolic version of the index, :math:`\tau_\mathrm h`, which has + been shown to provide the best balance between important and + unimportant elements [1]_. + + The weighting is defined by means of a rank array, which assigns a + nonnegative rank to each element (higher importance ranks being + associated with smaller values, e.g., 0 is the highest possible rank), + and a weigher function, which assigns a weight based on the rank to + each element. The weight of an exchange is then the sum or the product + of the weights of the ranks of the exchanged elements. The default + parameters compute :math:`\tau_\mathrm h`: an exchange between + elements with rank :math:`r` and :math:`s` (starting from zero) has + weight :math:`1/(r+1) + 1/(s+1)`. + + Specifying a rank array is meaningful only if you have in mind an + external criterion of importance. If, as it usually happens, you do + not have in mind a specific rank, the weighted :math:`\tau` is + defined by averaging the values obtained using the decreasing + lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the + behavior with default parameters. Note that the convention used + here for ranking (lower values imply higher importance) is opposite + to that used by other SciPy statistical functions. + + Parameters + ---------- + x, y : array_like + Arrays of scores, of the same shape. If arrays are not 1-D, they will + be flattened to 1-D. + rank : array_like of ints or bool, optional + A nonnegative rank assigned to each element. If it is None, the + decreasing lexicographical rank by (`x`, `y`) will be used: elements of + higher rank will be those with larger `x`-values, using `y`-values to + break ties (in particular, swapping `x` and `y` will give a different + result). If it is False, the element indices will be used + directly as ranks. The default is True, in which case this + function returns the average of the values obtained using the + decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). + weigher : callable, optional + The weigher function. Must map nonnegative integers (zero + representing the most important element) to a nonnegative weight. + The default, None, provides hyperbolic weighing, that is, + rank :math:`r` is mapped to weight :math:`1/(r+1)`. + additive : bool, optional + If True, the weight of an exchange is computed by adding the + weights of the ranks of the exchanged elements; otherwise, the weights + are multiplied. The default is True. + + Returns + ------- + res: SignificanceResult + An object containing attributes: + + statistic : float + The weighted :math:`\tau` correlation index. + pvalue : float + Presently ``np.nan``, as the null distribution of the statistic is + unknown (even in the additive hyperbolic case). + + See Also + -------- + kendalltau : Calculates Kendall's tau. + spearmanr : Calculates a Spearman rank-order correlation coefficient. + theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). + + Notes + ----- + This function uses an :math:`O(n \log n)`, mergesort-based algorithm + [1]_ that is a weighted extension of Knight's algorithm for Kendall's + :math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_ + between rankings without ties (i.e., permutations) by setting + `additive` and `rank` to False, as the definition given in [1]_ is a + generalization of Shieh's. + + NaNs are considered the smallest possible score. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sebastiano Vigna, "A weighted correlation index for rankings with + ties", Proceedings of the 24th international conference on World + Wide Web, pp. 1166-1176, ACM, 2015. + .. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with + Ungrouped Data", Journal of the American Statistical Association, + Vol. 61, No. 314, Part 1, pp. 436-439, 1966. + .. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics & + Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> res = stats.weightedtau(x, y) + >>> res.statistic + -0.56694968153682723 + >>> res.pvalue + nan + >>> res = stats.weightedtau(x, y, additive=False) + >>> res.statistic + -0.62205716951801038 + + NaNs are considered the smallest possible score: + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, np.nan] + >>> res = stats.weightedtau(x, y) + >>> res.statistic + -0.56694968153682723 + + This is exactly Kendall's tau: + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> res = stats.weightedtau(x, y, weigher=lambda x: 1) + >>> res.statistic + -0.47140452079103173 + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> stats.weightedtau(x, y, rank=None) + SignificanceResult(statistic=-0.4157652301037516, pvalue=nan) + >>> stats.weightedtau(y, x, rank=None) + SignificanceResult(statistic=-0.7181341329699028, pvalue=nan) + + """ + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + if x.size != y.size: + raise ValueError("All inputs to `weightedtau` must be " + "of the same size, " + f"found x-size {x.size} and y-size {y.size}") + if not x.size: + # Return NaN if arrays are empty + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + # If there are NaNs we apply _toint64() + if np.isnan(np.sum(x)): + x = _toint64(x) + if np.isnan(np.sum(y)): + y = _toint64(y) + + # Reduce to ranks unsupported types + if x.dtype != y.dtype: + if x.dtype != np.int64: + x = _toint64(x) + if y.dtype != np.int64: + y = _toint64(y) + else: + if x.dtype not in (np.int32, np.int64, np.float32, np.float64): + x = _toint64(x) + y = _toint64(y) + + if rank is True: + tau = ( + _weightedrankedtau(x, y, None, weigher, additive) + + _weightedrankedtau(y, x, None, weigher, additive) + ) / 2 + res = SignificanceResult(tau, np.nan) + res.correlation = tau + return res + + if rank is False: + rank = np.arange(x.size, dtype=np.intp) + elif rank is not None: + rank = np.asarray(rank).ravel() + if rank.size != x.size: + raise ValueError( + "All inputs to `weightedtau` must be of the same size, " + f"found x-size {x.size} and rank-size {rank.size}" + ) + + tau = _weightedrankedtau(x, y, rank, weigher, additive) + res = SignificanceResult(tau, np.nan) + res.correlation = tau + return res + + +##################################### +# INFERENTIAL STATISTICS # +##################################### + +TtestResultBase = _make_tuple_bunch('TtestResultBase', + ['statistic', 'pvalue'], ['df']) + + +class TtestResult(TtestResultBase): + """ + Result of a t-test. + + See the documentation of the particular t-test function for more + information about the definition of the statistic and meaning of + the confidence interval. + + Attributes + ---------- + statistic : float or array + The t-statistic of the sample. + pvalue : float or array + The p-value associated with the given alternative. + df : float or array + The number of degrees of freedom used in calculation of the + t-statistic; this is one less than the size of the sample + (``a.shape[axis]-1`` if there are no masked elements or omitted NaNs). + + Methods + ------- + confidence_interval + Computes a confidence interval around the population statistic + for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + """ + + def __init__(self, statistic, pvalue, df, # public + alternative, standard_error, estimate, # private + statistic_np=None, xp=None): # private + super().__init__(statistic, pvalue, df=df) + self._alternative = alternative + self._standard_error = standard_error # denominator of t-statistic + self._estimate = estimate # point estimate of sample mean + self._statistic_np = statistic if statistic_np is None else statistic_np + self._dtype = statistic.dtype + self._xp = array_namespace(statistic, pvalue) if xp is None else xp + + + def confidence_interval(self, confidence_level=0.95): + """ + Parameters + ---------- + confidence_level : float + The confidence level for the calculation of the population mean + confidence interval. Default is 0.95. + + Returns + ------- + ci : namedtuple + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + """ + low, high = _t_confidence_interval(self.df, self._statistic_np, + confidence_level, self._alternative, + self._dtype, self._xp) + low = low * self._standard_error + self._estimate + high = high * self._standard_error + self._estimate + return ConfidenceInterval(low=low, high=high) + + +def pack_TtestResult(statistic, pvalue, df, alternative, standard_error, + estimate): + # this could be any number of dimensions (including 0d), but there is + # at most one unique non-NaN value + alternative = np.atleast_1d(alternative) # can't index 0D object + alternative = alternative[np.isfinite(alternative)] + alternative = alternative[0] if alternative.size else np.nan + return TtestResult(statistic, pvalue, df=df, alternative=alternative, + standard_error=standard_error, estimate=estimate) + + +def unpack_TtestResult(res): + return (res.statistic, res.pvalue, res.df, res._alternative, + res._standard_error, res._estimate) + + +@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2, + result_to_tuple=unpack_TtestResult, n_outputs=6) +# nan_policy handled by `_axis_nan_policy`, but needs to be left +# in signature to preserve use as a positional argument +def ttest_1samp(a, popmean, axis=0, nan_policy="propagate", alternative="two-sided"): + """Calculate the T-test for the mean of ONE group of scores. + + This is a test for the null hypothesis that the expected value + (mean) of a sample of independent observations `a` is equal to the given + population mean, `popmean`. + + Parameters + ---------- + a : array_like + Sample observations. + popmean : float or array_like + Expected value in null hypothesis. If array_like, then its length along + `axis` must equal 1, and it must otherwise be broadcastable with `a`. + axis : int or None, optional + Axis along which to compute test; default is 0. If None, compute over + the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the mean of the underlying distribution of the sample + is different than the given population mean (`popmean`) + * 'less': the mean of the underlying distribution of the sample is + less than the given population mean (`popmean`) + * 'greater': the mean of the underlying distribution of the sample is + greater than the given population mean (`popmean`) + + Returns + ------- + result : `~scipy.stats._result_classes.TtestResult` + An object with the following attributes: + + statistic : float or array + The t-statistic. + pvalue : float or array + The p-value associated with the given alternative. + df : float or array + The number of degrees of freedom used in calculation of the + t-statistic; this is one less than the size of the sample + (``a.shape[axis]``). + + .. versionadded:: 1.10.0 + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the population + mean for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + .. versionadded:: 1.10.0 + + Notes + ----- + The statistic is calculated as ``(np.mean(a) - popmean)/se``, where + ``se`` is the standard error. Therefore, the statistic will be positive + when the sample mean is greater than the population mean and negative when + the sample mean is less than the population mean. + + Examples + -------- + Suppose we wish to test the null hypothesis that the mean of a population + is equal to 0.5. We choose a confidence level of 99%; that is, we will + reject the null hypothesis in favor of the alternative if the p-value is + less than 0.01. + + When testing random variates from the standard uniform distribution, which + has a mean of 0.5, we expect the data to be consistent with the null + hypothesis most of the time. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> rvs = stats.uniform.rvs(size=50, random_state=rng) + >>> stats.ttest_1samp(rvs, popmean=0.5) + TtestResult(statistic=2.456308468440, pvalue=0.017628209047638, df=49) + + As expected, the p-value of 0.017 is not below our threshold of 0.01, so + we cannot reject the null hypothesis. + + When testing data from the standard *normal* distribution, which has a mean + of 0, we would expect the null hypothesis to be rejected. + + >>> rvs = stats.norm.rvs(size=50, random_state=rng) + >>> stats.ttest_1samp(rvs, popmean=0.5) + TtestResult(statistic=-7.433605518875, pvalue=1.416760157221e-09, df=49) + + Indeed, the p-value is lower than our threshold of 0.01, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the mean + of the population is *not* equal to 0.5. + + However, suppose we were to test the null hypothesis against the + one-sided alternative that the mean of the population is *greater* than + 0.5. Since the mean of the standard normal is less than 0.5, we would not + expect the null hypothesis to be rejected. + + >>> stats.ttest_1samp(rvs, popmean=0.5, alternative='greater') + TtestResult(statistic=-7.433605518875, pvalue=0.99999999929, df=49) + + Unsurprisingly, with a p-value greater than our threshold, we would not + reject the null hypothesis. + + Note that when working with a confidence level of 99%, a true null + hypothesis will be rejected approximately 1% of the time. + + >>> rvs = stats.uniform.rvs(size=(100, 50), random_state=rng) + >>> res = stats.ttest_1samp(rvs, popmean=0.5, axis=1) + >>> np.sum(res.pvalue < 0.01) + 1 + + Indeed, even though all 100 samples above were drawn from the standard + uniform distribution, which *does* have a population mean of 0.5, we would + mistakenly reject the null hypothesis for one of them. + + `ttest_1samp` can also compute a confidence interval around the population + mean. + + >>> rvs = stats.norm.rvs(size=50, random_state=rng) + >>> res = stats.ttest_1samp(rvs, popmean=0) + >>> ci = res.confidence_interval(confidence_level=0.95) + >>> ci + ConfidenceInterval(low=-0.3193887540880017, high=0.2898583388980972) + + The bounds of the 95% confidence interval are the + minimum and maximum values of the parameter `popmean` for which the + p-value of the test would be 0.05. + + >>> res = stats.ttest_1samp(rvs, popmean=ci.low) + >>> np.testing.assert_allclose(res.pvalue, 0.05) + >>> res = stats.ttest_1samp(rvs, popmean=ci.high) + >>> np.testing.assert_allclose(res.pvalue, 0.05) + + Under certain assumptions about the population from which a sample + is drawn, the confidence interval with confidence level 95% is expected + to contain the true population mean in 95% of sample replications. + + >>> rvs = stats.norm.rvs(size=(50, 1000), loc=1, random_state=rng) + >>> res = stats.ttest_1samp(rvs, popmean=0) + >>> ci = res.confidence_interval() + >>> contains_pop_mean = (ci.low < 1) & (ci.high > 1) + >>> contains_pop_mean.sum() + 953 + + """ + xp = array_namespace(a) + a, axis = _chk_asarray(a, axis, xp=xp) + + n = a.shape[axis] + df = n - 1 + + if n == 0: + # This is really only needed for *testing* _axis_nan_policy decorator + # It won't happen when the decorator is used. + NaN = _get_nan(a) + return TtestResult(NaN, NaN, df=NaN, alternative=NaN, + standard_error=NaN, estimate=NaN) + + mean = xp.mean(a, axis=axis) + try: + popmean = xp.asarray(popmean) + popmean = xp.squeeze(popmean, axis=axis) if popmean.ndim > 0 else popmean + except ValueError as e: + raise ValueError("`popmean.shape[axis]` must equal 1.") from e + d = mean - popmean + v = _var(a, axis=axis, ddof=1) + denom = xp.sqrt(v / n) + + with np.errstate(divide='ignore', invalid='ignore'): + t = xp.divide(d, denom) + t = t[()] if t.ndim == 0 else t + + dist = _SimpleStudentT(xp.asarray(df, dtype=t.dtype)) + prob = _get_pvalue(t, dist, alternative, xp=xp) + prob = prob[()] if prob.ndim == 0 else prob + + # when nan_policy='omit', `df` can be different for different axis-slices + df = xp.broadcast_to(xp.asarray(df), t.shape) + df = df[()] if df.ndim == 0 else df + # _axis_nan_policy decorator doesn't play well with strings + alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative] + return TtestResult(t, prob, df=df, alternative=alternative_num, + standard_error=denom, estimate=mean, + statistic_np=xp.asarray(t), xp=xp) + + +def _t_confidence_interval(df, t, confidence_level, alternative, dtype=None, xp=None): + # Input validation on `alternative` is already done + # We just need IV on confidence_level + dtype = t.dtype if dtype is None else dtype + xp = array_namespace(t) if xp is None else xp + + # stdtrit not dispatched yet; use NumPy + df, t = np.asarray(df), np.asarray(t) + + if confidence_level < 0 or confidence_level > 1: + message = "`confidence_level` must be a number between 0 and 1." + raise ValueError(message) + + if alternative < 0: # 'less' + p = confidence_level + low, high = np.broadcast_arrays(-np.inf, special.stdtrit(df, p)) + elif alternative > 0: # 'greater' + p = 1 - confidence_level + low, high = np.broadcast_arrays(special.stdtrit(df, p), np.inf) + elif alternative == 0: # 'two-sided' + tail_probability = (1 - confidence_level)/2 + p = tail_probability, 1-tail_probability + # axis of p must be the zeroth and orthogonal to all the rest + p = np.reshape(p, [2] + [1]*np.asarray(df).ndim) + low, high = special.stdtrit(df, p) + else: # alternative is NaN when input is empty (see _axis_nan_policy) + p, nans = np.broadcast_arrays(t, np.nan) + low, high = nans, nans + + low = xp.asarray(low, dtype=dtype) + low = low[()] if low.ndim == 0 else low + high = xp.asarray(high, dtype=dtype) + high = high[()] if high.ndim == 0 else high + return low, high + + +def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative, xp=None): + xp = array_namespace(mean1, mean2, denom) if xp is None else xp + + d = mean1 - mean2 + with np.errstate(divide='ignore', invalid='ignore'): + t = xp.divide(d, denom) + + t_np = np.asarray(t) + df_np = np.asarray(df) + prob = _get_pvalue(t_np, distributions.t(df_np), alternative, xp=np) + prob = xp.asarray(prob, dtype=t.dtype) + + t = t[()] if t.ndim == 0 else t + prob = prob[()] if prob.ndim == 0 else prob + return t, prob + + +def _unequal_var_ttest_denom(v1, n1, v2, n2, xp=None): + xp = array_namespace(v1, v2) if xp is None else xp + vn1 = v1 / n1 + vn2 = v2 / n2 + with np.errstate(divide='ignore', invalid='ignore'): + df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) + + # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0). + # Hence it doesn't matter what df is as long as it's not NaN. + df = xp.where(xp.isnan(df), xp.asarray(1.), df) + denom = xp.sqrt(vn1 + vn2) + return df, denom + + +def _equal_var_ttest_denom(v1, n1, v2, n2, xp=None): + xp = array_namespace(v1, v2) if xp is None else xp + + # If there is a single observation in one sample, this formula for pooled + # variance breaks down because the variance of that sample is undefined. + # The pooled variance is still defined, though, because the (n-1) in the + # numerator should cancel with the (n-1) in the denominator, leaving only + # the sum of squared differences from the mean: zero. + zero = xp.asarray(0.) + v1 = xp.where(xp.asarray(n1 == 1), zero, v1) + v2 = xp.where(xp.asarray(n2 == 1), zero, v2) + + df = n1 + n2 - 2.0 + svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df + denom = xp.sqrt(svar * (1.0 / n1 + 1.0 / n2)) + return df, denom + + +Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) + + +def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, + equal_var=True, alternative="two-sided"): + r""" + T-test for means of two independent samples from descriptive statistics. + + This is a test for the null hypothesis that two independent + samples have identical average (expected) values. + + Parameters + ---------- + mean1 : array_like + The mean(s) of sample 1. + std1 : array_like + The corrected sample standard deviation of sample 1 (i.e. ``ddof=1``). + nobs1 : array_like + The number(s) of observations of sample 1. + mean2 : array_like + The mean(s) of sample 2. + std2 : array_like + The corrected sample standard deviation of sample 2 (i.e. ``ddof=1``). + nobs2 : array_like + The number(s) of observations of sample 2. + equal_var : bool, optional + If True (default), perform a standard independent 2 sample test + that assumes equal population variances [1]_. + If False, perform Welch's t-test, which does not assume equal + population variance [2]_. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions are unequal. + * 'less': the mean of the first distribution is less than the + mean of the second distribution. + * 'greater': the mean of the first distribution is greater than the + mean of the second distribution. + + .. versionadded:: 1.6.0 + + Returns + ------- + statistic : float or array + The calculated t-statistics. + pvalue : float or array + The two-tailed p-value. + + See Also + -------- + scipy.stats.ttest_ind + + Notes + ----- + The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the + standard error. Therefore, the statistic will be positive when `mean1` is + greater than `mean2` and negative when `mean1` is less than `mean2`. + + This method does not check whether any of the elements of `std1` or `std2` + are negative. If any elements of the `std1` or `std2` parameters are + negative in a call to this method, this method will return the same result + as if it were passed ``numpy.abs(std1)`` and ``numpy.abs(std2)``, + respectively, instead; no exceptions or warnings will be emitted. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test + + .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test + + Examples + -------- + Suppose we have the summary data for two samples, as follows (with the + Sample Variance being the corrected sample variance):: + + Sample Sample + Size Mean Variance + Sample 1 13 15.0 87.5 + Sample 2 11 12.0 39.0 + + Apply the t-test to this data (with the assumption that the population + variances are equal): + + >>> import numpy as np + >>> from scipy.stats import ttest_ind_from_stats + >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, + ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) + Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) + + For comparison, here is the data from which those summary statistics + were taken. With this data, we can compute the same result using + `scipy.stats.ttest_ind`: + + >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) + >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) + >>> from scipy.stats import ttest_ind + >>> ttest_ind(a, b) + TtestResult(statistic=0.905135809331027, + pvalue=0.3751996797581486, + df=22.0) + + Suppose we instead have binary data and would like to apply a t-test to + compare the proportion of 1s in two independent groups:: + + Number of Sample Sample + Size ones Mean Variance + Sample 1 150 30 0.2 0.161073 + Sample 2 200 45 0.225 0.175251 + + The sample mean :math:`\hat{p}` is the proportion of ones in the sample + and the variance for a binary observation is estimated by + :math:`\hat{p}(1-\hat{p})`. + + >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.161073), nobs1=150, + ... mean2=0.225, std2=np.sqrt(0.175251), nobs2=200) + Ttest_indResult(statistic=-0.5627187905196761, pvalue=0.5739887114209541) + + For comparison, we could compute the t statistic and p-value using + arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above. + + >>> group1 = np.array([1]*30 + [0]*(150-30)) + >>> group2 = np.array([1]*45 + [0]*(200-45)) + >>> ttest_ind(group1, group2) + TtestResult(statistic=-0.5627179589855622, + pvalue=0.573989277115258, + df=348.0) + + """ + xp = array_namespace(mean1, std1, mean2, std2) + + mean1 = xp.asarray(mean1) + std1 = xp.asarray(std1) + mean2 = xp.asarray(mean2) + std2 = xp.asarray(std2) + + if equal_var: + df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2, xp=xp) + else: + df, denom = _unequal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2, xp=xp) + + res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative) + return Ttest_indResult(*res) + + +_ttest_ind_dep_msg = "Use ``method`` to perform a permutation test." +@_deprecate_positional_args(version='1.17.0', + deprecated_args={'permutations', 'random_state'}, + custom_message=_ttest_ind_dep_msg) +@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2, + result_to_tuple=unpack_TtestResult, n_outputs=6) +def ttest_ind(a, b, *, axis=0, equal_var=True, nan_policy='propagate', + permutations=None, random_state=None, alternative="two-sided", + trim=0, method=None): + """ + Calculate the T-test for the means of *two independent* samples of scores. + + This is a test for the null hypothesis that 2 independent samples + have identical average (expected) values. This test assumes that the + populations have identical variances by default. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + equal_var : bool, optional + If True (default), perform a standard independent 2 sample test + that assumes equal population variances [1]_. + If False, perform Welch's t-test, which does not assume equal + population variance [2]_. + + .. versionadded:: 0.11.0 + + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + The 'omit' option is not currently available for permutation tests or + one-sided asymptotic tests. + + permutations : non-negative int, np.inf, or None (default), optional + If 0 or None (default), use the t-distribution to calculate p-values. + Otherwise, `permutations` is the number of random permutations that + will be used to estimate p-values using a permutation test. If + `permutations` equals or exceeds the number of distinct partitions of + the pooled data, an exact test is performed instead (i.e. each + distinct partition is used exactly once). See Notes for details. + + .. deprecated:: 1.17.0 + `permutations` is deprecated and will be removed in SciPy 1.7.0. + Use the `n_resamples` argument of `PermutationMethod`, instead, + and pass the instance as the `method` argument. + + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Pseudorandom number generator state used to generate permutations + (used only when `permutations` is not None). + + .. deprecated:: 1.17.0 + `random_state` is deprecated and will be removed in SciPy 1.7.0. + Use the `rng` argument of `PermutationMethod`, instead, + and pass the instance as the `method` argument. + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + are unequal. + * 'less': the mean of the distribution underlying the first sample + is less than the mean of the distribution underlying the second + sample. + * 'greater': the mean of the distribution underlying the first + sample is greater than the mean of the distribution underlying + the second sample. + + trim : float, optional + If nonzero, performs a trimmed (Yuen's) t-test. + Defines the fraction of elements to be trimmed from each end of the + input samples. If 0 (default), no elements will be trimmed from either + side. The number of trimmed elements from each tail is the floor of the + trim times the number of elements. Valid range is [0, .5). + method : ResamplingMethod, optional + Defines the method used to compute the p-value. If `method` is an + instance of `PermutationMethod`/`MonteCarloMethod`, the p-value is + computed using + `scipy.stats.permutation_test`/`scipy.stats.monte_carlo_test` with the + provided configuration options and other appropriate settings. + Otherwise, the p-value is computed by comparing the test statistic + against a theoretical t-distribution. + + .. versionadded:: 1.15.0 + + Returns + ------- + result : `~scipy.stats._result_classes.TtestResult` + An object with the following attributes: + + statistic : float or ndarray + The t-statistic. + pvalue : float or ndarray + The p-value associated with the given alternative. + df : float or ndarray + The number of degrees of freedom used in calculation of the + t-statistic. This is always NaN for a permutation t-test. + + .. versionadded:: 1.11.0 + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the difference in + population means for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields ``low`` and ``high``. + When a permutation t-test is performed, the confidence interval + is not computed, and fields ``low`` and ``high`` contain NaN. + + .. versionadded:: 1.11.0 + + Notes + ----- + Suppose we observe two independent samples, e.g. flower petal lengths, and + we are considering whether the two samples were drawn from the same + population (e.g. the same species of flower or two species with similar + petal characteristics) or two different populations. + + The t-test quantifies the difference between the arithmetic means + of the two samples. The p-value quantifies the probability of observing + as or more extreme values assuming the null hypothesis, that the + samples are drawn from populations with the same population means, is true. + A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that + our observation is not so unlikely to have occurred by chance. Therefore, + we do not reject the null hypothesis of equal population means. + If the p-value is smaller than our threshold, then we have evidence + against the null hypothesis of equal population means. + + By default, the p-value is determined by comparing the t-statistic of the + observed data against a theoretical t-distribution. + + (In the following, note that the argument `permutations` itself is + deprecated, but a nearly identical test may be performed by creating + an instance of `scipy.stats.PermutationMethod` with ``n_resamples=permutuations`` + and passing it as the `method` argument.) + When ``1 < permutations < binom(n, k)``, where + + * ``k`` is the number of observations in `a`, + * ``n`` is the total number of observations in `a` and `b`, and + * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), + + the data are pooled (concatenated), randomly assigned to either group `a` + or `b`, and the t-statistic is calculated. This process is performed + repeatedly (`permutation` times), generating a distribution of the + t-statistic under the null hypothesis, and the t-statistic of the observed + data is compared to this distribution to determine the p-value. + Specifically, the p-value reported is the "achieved significance level" + (ASL) as defined in 4.4 of [3]_. Note that there are other ways of + estimating p-values using randomized permutation tests; for other + options, see the more general `permutation_test`. + + When ``permutations >= binom(n, k)``, an exact test is performed: the data + are partitioned between the groups in each distinct way exactly once. + + The permutation test can be computationally expensive and not necessarily + more accurate than the analytical test, but it does not make strong + assumptions about the shape of the underlying distribution. + + Use of trimming is commonly referred to as the trimmed t-test. At times + called Yuen's t-test, this is an extension of Welch's t-test, with the + difference being the use of winsorized means in calculation of the variance + and the trimmed sample size in calculation of the statistic. Trimming is + recommended if the underlying distribution is long-tailed or contaminated + with outliers [4]_. + + The statistic is calculated as ``(np.mean(a) - np.mean(b))/se``, where + ``se`` is the standard error. Therefore, the statistic will be positive + when the sample mean of `a` is greater than the sample mean of `b` and + negative when the sample mean of `a` is less than the sample mean of + `b`. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test + + .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test + + .. [3] B. Efron and T. Hastie. Computer Age Statistical Inference. (2016). + + .. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population + Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR, + www.jstor.org/stable/2334299. Accessed 30 Mar. 2021. + + .. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and + Performance of the Two-Sample Trimmed t." Biometrika, vol. 60, + no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550. + Accessed 30 Mar. 2021. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + + Test with sample with identical means: + + >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + >>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs2) + TtestResult(statistic=-0.4390847099199348, + pvalue=0.6606952038870015, + df=998.0) + >>> stats.ttest_ind(rvs1, rvs2, equal_var=False) + TtestResult(statistic=-0.4390847099199348, + pvalue=0.6606952553131064, + df=997.4602304121448) + + `ttest_ind` underestimates p for unequal variances: + + >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs3) + TtestResult(statistic=-1.6370984482905417, + pvalue=0.1019251574705033, + df=998.0) + >>> stats.ttest_ind(rvs1, rvs3, equal_var=False) + TtestResult(statistic=-1.637098448290542, + pvalue=0.10202110497954867, + df=765.1098655246868) + + When ``n1 != n2``, the equal variance t-statistic is no longer equal to the + unequal variance t-statistic: + + >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs4) + TtestResult(statistic=-1.9481646859513422, + pvalue=0.05186270935842703, + df=598.0) + >>> stats.ttest_ind(rvs1, rvs4, equal_var=False) + TtestResult(statistic=-1.3146566100751664, + pvalue=0.1913495266513811, + df=110.41349083985212) + + T-test with different means, variance, and n: + + >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs5) + TtestResult(statistic=-2.8415950600298774, + pvalue=0.0046418707568707885, + df=598.0) + >>> stats.ttest_ind(rvs1, rvs5, equal_var=False) + TtestResult(statistic=-1.8686598649188084, + pvalue=0.06434714193919686, + df=109.32167496550137) + + Take these two samples, one of which has an extreme tail. + + >>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3) + >>> b = (1.1, 2.9, 4.2) + + Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example, + using 20% trimming, ``trim=.2``, the test will reduce the impact of one + (``np.floor(trim*len(a))``) element from each tail of sample `a`. It will + have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0. + + >>> stats.ttest_ind(a, b, trim=.2) + TtestResult(statistic=3.4463884028073513, + pvalue=0.01369338726499547, + df=6.0) + """ + xp = array_namespace(a, b) + + default_float = xp.asarray(1.).dtype + if xp.isdtype(a.dtype, 'integral'): + a = xp.astype(a, default_float) + if xp.isdtype(b.dtype, 'integral'): + b = xp.astype(b, default_float) + + if not (0 <= trim < .5): + raise ValueError("Trimming percentage should be 0 <= `trim` < .5.") + + if not isinstance(method, PermutationMethod | MonteCarloMethod | None): + message = ("`method` must be an instance of `PermutationMethod`, an instance " + "of `MonteCarloMethod`, or None (default).") + raise ValueError(message) + + if not is_numpy(xp) and method is not None: + message = "Use of resampling methods is compatible only with NumPy arrays." + raise NotImplementedError(message) + + result_shape = _broadcast_array_shapes_remove_axis((a, b), axis=axis) + NaN = xp.full(result_shape, _get_nan(a, b, xp=xp)) + NaN = NaN[()] if NaN.ndim == 0 else NaN + if xp_size(a) == 0 or xp_size(b) == 0: + return TtestResult(NaN, NaN, df=NaN, alternative=NaN, + standard_error=NaN, estimate=NaN) + + alternative_nums = {"less": -1, "two-sided": 0, "greater": 1} + + # This probably should be deprecated and replaced with a `method` argument + if permutations is not None and permutations != 0: + message = "Use of `permutations` is compatible only with NumPy arrays." + if not is_numpy(xp): + raise NotImplementedError(message) + + message = "Use of `permutations` is incompatible with with use of `trim`." + if trim != 0: + raise NotImplementedError(message) + + t, prob = _permutation_ttest(a, b, permutations=permutations, + axis=axis, equal_var=equal_var, + nan_policy=nan_policy, + random_state=random_state, + alternative=alternative) + df, denom, estimate = NaN, NaN, NaN + + # _axis_nan_policy decorator doesn't play well with strings + return TtestResult(t, prob, df=df, alternative=alternative_nums[alternative], + standard_error=denom, estimate=estimate) + + n1 = xp.asarray(a.shape[axis], dtype=a.dtype) + n2 = xp.asarray(b.shape[axis], dtype=b.dtype) + + if trim == 0: + with np.errstate(divide='ignore', invalid='ignore'): + v1 = _var(a, axis, ddof=1, xp=xp) + v2 = _var(b, axis, ddof=1, xp=xp) + + m1 = xp.mean(a, axis=axis) + m2 = xp.mean(b, axis=axis) + else: + message = "Use of `trim` is compatible only with NumPy arrays." + if not is_numpy(xp): + raise NotImplementedError(message) + + v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis) + v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis) + + if equal_var: + df, denom = _equal_var_ttest_denom(v1, n1, v2, n2, xp=xp) + else: + df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2, xp=xp) + + if method is None: + t, prob = _ttest_ind_from_stats(m1, m2, denom, df, alternative) + else: + # nan_policy is taken care of by axis_nan_policy decorator + ttest_kwargs = dict(equal_var=equal_var, trim=trim) + t, prob = _ttest_resampling(a, b, axis, alternative, ttest_kwargs, method) + + # when nan_policy='omit', `df` can be different for different axis-slices + df = xp.broadcast_to(df, t.shape) + df = df[()] if df.ndim ==0 else df + estimate = m1 - m2 + + return TtestResult(t, prob, df=df, alternative=alternative_nums[alternative], + standard_error=denom, estimate=estimate) + + +def _ttest_resampling(x, y, axis, alternative, ttest_kwargs, method): + def statistic(x, y, axis): + return ttest_ind(x, y, axis=axis, **ttest_kwargs).statistic + + test = (permutation_test if isinstance(method, PermutationMethod) + else monte_carlo_test) + method = method._asdict() + + if test is monte_carlo_test: + # `monte_carlo_test` accepts an `rvs` tuple of callables, not an `rng` + # If the user specified an `rng`, replace it with the default callables + if (rng := method.pop('rng', None)) is not None: + rng = np.random.default_rng(rng) + method['rvs'] = rng.normal, rng.normal + + res = test((x, y,), statistic=statistic, axis=axis, + alternative=alternative, **method) + + return res.statistic, res.pvalue + + +def _ttest_trim_var_mean_len(a, trim, axis): + """Variance, mean, and length of winsorized input along specified axis""" + # for use with `ttest_ind` when trimming. + # further calculations in this test assume that the inputs are sorted. + # From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..." + a = np.sort(a, axis=axis) + + # `g` is the number of elements to be replaced on each tail, converted + # from a percentage amount of trimming + n = a.shape[axis] + g = int(n * trim) + + # Calculate the Winsorized variance of the input samples according to + # specified `g` + v = _calculate_winsorized_variance(a, g, axis) + + # the total number of elements in the trimmed samples + n -= 2 * g + + # calculate the g-times trimmed mean, as defined in [4] (1-1) + m = trim_mean(a, trim, axis=axis) + return v, m, n + + +def _calculate_winsorized_variance(a, g, axis): + """Calculates g-times winsorized variance along specified axis""" + # it is expected that the input `a` is sorted along the correct axis + if g == 0: + return _var(a, ddof=1, axis=axis) + # move the intended axis to the end that way it is easier to manipulate + a_win = np.moveaxis(a, axis, -1) + + # save where NaNs are for later use. + nans_indices = np.any(np.isnan(a_win), axis=-1) + + # Winsorization and variance calculation are done in one step in [4] + # (1-3), but here winsorization is done first; replace the left and + # right sides with the repeating value. This can be see in effect in ( + # 1-3) in [4], where the leftmost and rightmost tails are replaced with + # `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the + # right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in + # array indexing. + a_win[..., :g] = a_win[..., [g]] + a_win[..., -g:] = a_win[..., [-g - 1]] + + # Determine the variance. In [4], the degrees of freedom is expressed as + # `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of + # page 369, beginning of page 370). This is converted to NumPy's format, + # `n - ddof` for use with `np.var`. The result is converted to an + # array to accommodate indexing later. + var_win = np.asarray(_var(a_win, ddof=(2 * g + 1), axis=-1)) + + # with `nan_policy='propagate'`, NaNs may be completely trimmed out + # because they were sorted into the tail of the array. In these cases, + # replace computed variances with `np.nan`. + var_win[nans_indices] = np.nan + return var_win + + +def _permutation_distribution_t(data, permutations, size_a, equal_var, + random_state=None): + """Generation permutation distribution of t statistic""" + + random_state = check_random_state(random_state) + + # prepare permutation indices + size = data.shape[-1] + # number of distinct combinations + n_max = special.comb(size, size_a) + + if permutations < n_max: + perm_generator = (random_state.permutation(size) + for i in range(permutations)) + else: + permutations = n_max + perm_generator = (np.concatenate(z) + for z in _all_partitions(size_a, size-size_a)) + + t_stat = [] + for indices in _batch_generator(perm_generator, batch=50): + # get one batch from perm_generator at a time as a list + indices = np.array(indices) + # generate permutations + data_perm = data[..., indices] + # move axis indexing permutations to position 0 to broadcast + # nicely with t_stat_observed, which doesn't have this dimension + data_perm = np.moveaxis(data_perm, -2, 0) + + a = data_perm[..., :size_a] + b = data_perm[..., size_a:] + t_stat.append(_calc_t_stat(a, b, equal_var)) + + t_stat = np.concatenate(t_stat, axis=0) + + return t_stat, permutations, n_max + + +def _calc_t_stat(a, b, equal_var, axis=-1): + """Calculate the t statistic along the given dimension.""" + na = a.shape[axis] + nb = b.shape[axis] + avg_a = np.mean(a, axis=axis) + avg_b = np.mean(b, axis=axis) + var_a = _var(a, axis=axis, ddof=1) + var_b = _var(b, axis=axis, ddof=1) + + if not equal_var: + _, denom = _unequal_var_ttest_denom(var_a, na, var_b, nb) + else: + _, denom = _equal_var_ttest_denom(var_a, na, var_b, nb) + + return (avg_a-avg_b)/denom + + +def _permutation_ttest(a, b, permutations, axis=0, equal_var=True, + nan_policy='propagate', random_state=None, + alternative="two-sided"): + """ + Calculates the T-test for the means of TWO INDEPENDENT samples of scores + using permutation methods. + + This test is similar to `stats.ttest_ind`, except it doesn't rely on an + approximate normality assumption since it uses a permutation test. + This function is only called from ttest_ind when permutations is not None. + + Parameters + ---------- + a, b : array_like + The arrays must be broadcastable, except along the dimension + corresponding to `axis` (the zeroth, by default). + axis : int, optional + The axis over which to operate on a and b. + permutations : int, optional + Number of permutations used to calculate p-value. If greater than or + equal to the number of distinct permutations, perform an exact test. + equal_var : bool, optional + If False, an equal variance (Welch's) t-test is conducted. Otherwise, + an ordinary t-test is conducted. + random_state : {None, int, `numpy.random.Generator`}, optional + If `seed` is None the `numpy.random.Generator` singleton is used. + If `seed` is an int, a new ``Generator`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` instance then that instance is + used. + Pseudorandom number generator state used for generating random + permutations. + + Returns + ------- + statistic : float or array + The calculated t-statistic. + pvalue : float or array + The p-value. + + """ + if permutations < 0 or (np.isfinite(permutations) and + int(permutations) != permutations): + raise ValueError("Permutations must be a non-negative integer.") + + random_state = check_random_state(random_state) + + t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis) + + na = a.shape[axis] + mat = _broadcast_concatenate((a, b), axis=axis) + mat = np.moveaxis(mat, axis, -1) + + t_stat, permutations, n_max = _permutation_distribution_t( + mat, permutations, size_a=na, equal_var=equal_var, + random_state=random_state) + + compare = {"less": np.less_equal, + "greater": np.greater_equal, + "two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))} + + # Calculate the p-values + cmps = compare[alternative](t_stat, t_stat_observed) + # Randomized test p-value calculation should use biased estimate; see e.g. + # https://www.degruyter.com/document/doi/10.2202/1544-6115.1585/ + adjustment = 1 if n_max > permutations else 0 + pvalues = (cmps.sum(axis=0) + adjustment) / (permutations + adjustment) + + # nans propagate naturally in statistic calculation, but need to be + # propagated manually into pvalues + if nan_policy == 'propagate' and np.isnan(t_stat_observed).any(): + if np.ndim(pvalues) == 0: + pvalues = np.float64(np.nan) + else: + pvalues[np.isnan(t_stat_observed)] = np.nan + + return (t_stat_observed, pvalues) + + +def _get_len(a, axis, msg): + try: + n = a.shape[axis] + except IndexError: + raise AxisError(axis, a.ndim, msg) from None + return n + + +@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2, + result_to_tuple=unpack_TtestResult, n_outputs=6, + paired=True) +def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"): + """Calculate the t-test on TWO RELATED samples of scores, a and b. + + This is a test for the null hypothesis that two related or + repeated samples have identical average (expected) values. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape. + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + are unequal. + * 'less': the mean of the distribution underlying the first sample + is less than the mean of the distribution underlying the second + sample. + * 'greater': the mean of the distribution underlying the first + sample is greater than the mean of the distribution underlying + the second sample. + + .. versionadded:: 1.6.0 + + Returns + ------- + result : `~scipy.stats._result_classes.TtestResult` + An object with the following attributes: + + statistic : float or array + The t-statistic. + pvalue : float or array + The p-value associated with the given alternative. + df : float or array + The number of degrees of freedom used in calculation of the + t-statistic; this is one less than the size of the sample + (``a.shape[axis]``). + + .. versionadded:: 1.10.0 + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the difference in + population means for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + .. versionadded:: 1.10.0 + + Notes + ----- + Examples for use are scores of the same set of student in + different exams, or repeated sampling from the same units. The + test measures whether the average score differs significantly + across samples (e.g. exams). If we observe a large p-value, for + example greater than 0.05 or 0.1 then we cannot reject the null + hypothesis of identical average scores. If the p-value is smaller + than the threshold, e.g. 1%, 5% or 10%, then we reject the null + hypothesis of equal averages. Small p-values are associated with + large t-statistics. + + The t-statistic is calculated as ``np.mean(a - b)/se``, where ``se`` is the + standard error. Therefore, the t-statistic will be positive when the sample + mean of ``a - b`` is greater than zero and negative when the sample mean of + ``a - b`` is less than zero. + + References + ---------- + https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + + >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + >>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + ... + stats.norm.rvs(scale=0.2, size=500, random_state=rng)) + >>> stats.ttest_rel(rvs1, rvs2) + TtestResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672, df=499) + >>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng) + ... + stats.norm.rvs(scale=0.2, size=500, random_state=rng)) + >>> stats.ttest_rel(rvs1, rvs3) + TtestResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09, df=499) + + """ + return ttest_1samp(a - b, popmean=0, axis=axis, alternative=alternative, + _no_deco=True) + + +# Map from names to lambda_ values used in power_divergence(). +_power_div_lambda_names = { + "pearson": 1, + "log-likelihood": 0, + "freeman-tukey": -0.5, + "mod-log-likelihood": -1, + "neyman": -2, + "cressie-read": 2/3, +} + + +def _m_count(a, *, axis, xp): + """Count the number of non-masked elements of an array. + + This function behaves like `np.ma.count`, but is much faster + for ndarrays. + """ + if hasattr(a, 'count'): + num = a.count(axis=axis) + if isinstance(num, np.ndarray) and num.ndim == 0: + # In some cases, the `count` method returns a scalar array (e.g. + # np.array(3)), but we want a plain integer. + num = int(num) + else: + if axis is None: + num = xp_size(a) + else: + num = a.shape[axis] + return num + + +def _m_broadcast_to(a, shape, *, xp): + if np.ma.isMaskedArray(a): + return np.ma.masked_array(np.broadcast_to(a, shape), + mask=np.broadcast_to(a.mask, shape)) + return xp.broadcast_to(a, shape) + + +def _m_sum(a, *, axis, preserve_mask, xp): + if np.ma.isMaskedArray(a): + sum = a.sum(axis) + return sum if preserve_mask else np.asarray(sum) + return xp.sum(a, axis=axis) + + +def _m_mean(a, *, axis, keepdims, xp): + if np.ma.isMaskedArray(a): + return np.asarray(a.mean(axis=axis, keepdims=keepdims)) + return xp.mean(a, axis=axis, keepdims=keepdims) + + +Power_divergenceResult = namedtuple('Power_divergenceResult', + ('statistic', 'pvalue')) + + +def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None): + """Cressie-Read power divergence statistic and goodness of fit test. + + This function tests the null hypothesis that the categorical data + has the given frequencies, using the Cressie-Read power divergence + statistic. + + Parameters + ---------- + f_obs : array_like + Observed frequencies in each category. + + .. deprecated:: 1.14.0 + Support for masked array input was deprecated in + SciPy 1.14.0 and will be removed in version 1.16.0. + + f_exp : array_like, optional + Expected frequencies in each category. By default the categories are + assumed to be equally likely. + + .. deprecated:: 1.14.0 + Support for masked array input was deprecated in + SciPy 1.14.0 and will be removed in version 1.16.0. + + ddof : int, optional + "Delta degrees of freedom": adjustment to the degrees of freedom + for the p-value. The p-value is computed using a chi-squared + distribution with ``k - 1 - ddof`` degrees of freedom, where `k` + is the number of observed frequencies. The default value of `ddof` + is 0. + axis : int or None, optional + The axis of the broadcast result of `f_obs` and `f_exp` along which to + apply the test. If axis is None, all values in `f_obs` are treated + as a single data set. Default is 0. + lambda_ : float or str, optional + The power in the Cressie-Read power divergence statistic. The default + is 1. For convenience, `lambda_` may be assigned one of the following + strings, in which case the corresponding numerical value is used: + + * ``"pearson"`` (value 1) + Pearson's chi-squared statistic. In this case, the function is + equivalent to `chisquare`. + * ``"log-likelihood"`` (value 0) + Log-likelihood ratio. Also known as the G-test [3]_. + * ``"freeman-tukey"`` (value -1/2) + Freeman-Tukey statistic. + * ``"mod-log-likelihood"`` (value -1) + Modified log-likelihood ratio. + * ``"neyman"`` (value -2) + Neyman's statistic. + * ``"cressie-read"`` (value 2/3) + The power recommended in [5]_. + + Returns + ------- + res: Power_divergenceResult + An object containing attributes: + + statistic : float or ndarray + The Cressie-Read power divergence test statistic. The value is + a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D. + pvalue : float or ndarray + The p-value of the test. The value is a float if `ddof` and the + return value `stat` are scalars. + + See Also + -------- + chisquare + + Notes + ----- + This test is invalid when the observed or expected frequencies in each + category are too small. A typical rule is that all of the observed + and expected frequencies should be at least 5. + + Also, the sum of the observed and expected frequencies must be the same + for the test to be valid; `power_divergence` raises an error if the sums + do not agree within a relative tolerance of ``eps**0.5``, where ``eps`` + is the precision of the input dtype. + + When `lambda_` is less than zero, the formula for the statistic involves + dividing by `f_obs`, so a warning or error may be generated if any value + in `f_obs` is 0. + + Similarly, a warning or error may be generated if any value in `f_exp` is + zero when `lambda_` >= 0. + + The default degrees of freedom, k-1, are for the case when no parameters + of the distribution are estimated. If p parameters are estimated by + efficient maximum likelihood then the correct degrees of freedom are + k-1-p. If the parameters are estimated in a different way, then the + dof can be between k-1-p and k-1. However, it is also possible that + the asymptotic distribution is not a chisquare, in which case this + test is not appropriate. + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 8. + https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html + .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test + .. [3] "G-test", https://en.wikipedia.org/wiki/G-test + .. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and + practice of statistics in biological research", New York: Freeman + (1981) + .. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit + Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), + pp. 440-464. + + Examples + -------- + (See `chisquare` for more examples.) + + When just `f_obs` is given, it is assumed that the expected frequencies + are uniform and given by the mean of the observed frequencies. Here we + perform a G-test (i.e. use the log-likelihood ratio statistic): + + >>> import numpy as np + >>> from scipy.stats import power_divergence + >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood') + (2.006573162632538, 0.84823476779463769) + + The expected frequencies can be given with the `f_exp` argument: + + >>> power_divergence([16, 18, 16, 14, 12, 12], + ... f_exp=[16, 16, 16, 16, 16, 8], + ... lambda_='log-likelihood') + (3.3281031458963746, 0.6495419288047497) + + When `f_obs` is 2-D, by default the test is applied to each column. + + >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T + >>> obs.shape + (6, 2) + >>> power_divergence(obs, lambda_="log-likelihood") + (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225])) + + By setting ``axis=None``, the test is applied to all data in the array, + which is equivalent to applying the test to the flattened array. + + >>> power_divergence(obs, axis=None) + (23.31034482758621, 0.015975692534127565) + >>> power_divergence(obs.ravel()) + (23.31034482758621, 0.015975692534127565) + + `ddof` is the change to make to the default degrees of freedom. + + >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1) + (2.0, 0.73575888234288467) + + The calculation of the p-values is done by broadcasting the + test statistic with `ddof`. + + >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) + (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) + + `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has + shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting + `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared + statistics, we must use ``axis=1``: + + >>> power_divergence([16, 18, 16, 14, 12, 12], + ... f_exp=[[16, 16, 16, 16, 16, 8], + ... [8, 20, 20, 16, 12, 12]], + ... axis=1) + (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) + + """ + return _power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_=lambda_) + + +def _power_divergence(f_obs, f_exp, ddof, axis, lambda_, sum_check=True): + xp = array_namespace(f_obs) + default_float = xp.asarray(1.).dtype + + # Convert the input argument `lambda_` to a numerical value. + if isinstance(lambda_, str): + if lambda_ not in _power_div_lambda_names: + names = repr(list(_power_div_lambda_names.keys()))[1:-1] + raise ValueError(f"invalid string for lambda_: {lambda_!r}. " + f"Valid strings are {names}") + lambda_ = _power_div_lambda_names[lambda_] + elif lambda_ is None: + lambda_ = 1 + + def warn_masked(arg): + if isinstance(arg, ma.MaskedArray): + message = ( + "`power_divergence` and `chisquare` support for masked array input was " + "deprecated in SciPy 1.14.0 and will be removed in version 1.16.0.") + warnings.warn(message, DeprecationWarning, stacklevel=2) + + warn_masked(f_obs) + f_obs = f_obs if np.ma.isMaskedArray(f_obs) else xp.asarray(f_obs) + dtype = default_float if xp.isdtype(f_obs.dtype, 'integral') else f_obs.dtype + f_obs = (f_obs.astype(dtype) if np.ma.isMaskedArray(f_obs) + else xp.asarray(f_obs, dtype=dtype)) + f_obs_float = (f_obs.astype(np.float64) if hasattr(f_obs, 'mask') + else xp.asarray(f_obs, dtype=xp.float64)) + + if f_exp is not None: + warn_masked(f_exp) + f_exp = f_exp if np.ma.isMaskedArray(f_obs) else xp.asarray(f_exp) + dtype = default_float if xp.isdtype(f_exp.dtype, 'integral') else f_exp.dtype + f_exp = (f_exp.astype(dtype) if np.ma.isMaskedArray(f_exp) + else xp.asarray(f_exp, dtype=dtype)) + + bshape = _broadcast_shapes((f_obs_float.shape, f_exp.shape)) + f_obs_float = _m_broadcast_to(f_obs_float, bshape, xp=xp) + f_exp = _m_broadcast_to(f_exp, bshape, xp=xp) + + if sum_check: + dtype_res = xp.result_type(f_obs.dtype, f_exp.dtype) + rtol = xp.finfo(dtype_res).eps**0.5 # to pass existing tests + with np.errstate(invalid='ignore'): + f_obs_sum = _m_sum(f_obs_float, axis=axis, preserve_mask=False, xp=xp) + f_exp_sum = _m_sum(f_exp, axis=axis, preserve_mask=False, xp=xp) + relative_diff = (xp.abs(f_obs_sum - f_exp_sum) / + xp.minimum(f_obs_sum, f_exp_sum)) + diff_gt_tol = xp.any(relative_diff > rtol, axis=None) + if diff_gt_tol: + msg = (f"For each axis slice, the sum of the observed " + f"frequencies must agree with the sum of the " + f"expected frequencies to a relative tolerance " + f"of {rtol}, but the percent differences are:\n" + f"{relative_diff}") + raise ValueError(msg) + + else: + # Ignore 'invalid' errors so the edge case of a data set with length 0 + # is handled without spurious warnings. + with np.errstate(invalid='ignore'): + f_exp = _m_mean(f_obs, axis=axis, keepdims=True, xp=xp) + + # `terms` is the array of terms that are summed along `axis` to create + # the test statistic. We use some specialized code for a few special + # cases of lambda_. + if lambda_ == 1: + # Pearson's chi-squared statistic + terms = (f_obs - f_exp)**2 / f_exp + elif lambda_ == 0: + # Log-likelihood ratio (i.e. G-test) + terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp) + elif lambda_ == -1: + # Modified log-likelihood ratio + terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs) + else: + # General Cressie-Read power divergence. + terms = f_obs * ((f_obs / f_exp)**lambda_ - 1) + terms /= 0.5 * lambda_ * (lambda_ + 1) + + stat = _m_sum(terms, axis=axis, preserve_mask=True, xp=xp) + + num_obs = _m_count(terms, axis=axis, xp=xp) + ddof = xp.asarray(ddof) + + df = xp.asarray(num_obs - 1 - ddof) + chi2 = _SimpleChi2(df) + pvalue = _get_pvalue(stat, chi2 , alternative='greater', symmetric=False, xp=xp) + + stat = stat[()] if stat.ndim == 0 else stat + pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue + + return Power_divergenceResult(stat, pvalue) + + +def chisquare(f_obs, f_exp=None, ddof=0, axis=0, *, sum_check=True): + """Perform Pearson's chi-squared test. + + Pearson's chi-squared test [1]_ is a goodness-of-fit test for a multinomial + distribution with given probabilities; that is, it assesses the null hypothesis + that the observed frequencies (counts) are obtained by independent + sampling of *N* observations from a categorical distribution with given + expected frequencies. + + Parameters + ---------- + f_obs : array_like + Observed frequencies in each category. + f_exp : array_like, optional + Expected frequencies in each category. By default, the categories are + assumed to be equally likely. + ddof : int, optional + "Delta degrees of freedom": adjustment to the degrees of freedom + for the p-value. The p-value is computed using a chi-squared + distribution with ``k - 1 - ddof`` degrees of freedom, where ``k`` + is the number of categories. The default value of `ddof` is 0. + axis : int or None, optional + The axis of the broadcast result of `f_obs` and `f_exp` along which to + apply the test. If axis is None, all values in `f_obs` are treated + as a single data set. Default is 0. + sum_check : bool, optional + Whether to perform a check that ``sum(f_obs) - sum(f_exp) == 0``. If True, + (default) raise an error when the relative difference exceeds the square root + of the precision of the data type. See Notes for rationale and possible + exceptions. + + Returns + ------- + res: Power_divergenceResult + An object containing attributes: + + statistic : float or ndarray + The chi-squared test statistic. The value is a float if `axis` is + None or `f_obs` and `f_exp` are 1-D. + pvalue : float or ndarray + The p-value of the test. The value is a float if `ddof` and the + result attribute `statistic` are scalars. + + See Also + -------- + scipy.stats.power_divergence + scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table. + scipy.stats.barnard_exact : An unconditional exact test. An alternative + to chi-squared test for small sample sizes. + :ref:`hypothesis_chisquare` : Extended example + + Notes + ----- + This test is invalid when the observed or expected frequencies in each + category are too small. A typical rule is that all of the observed + and expected frequencies should be at least 5. According to [2]_, the + total number of observations is recommended to be greater than 13, + otherwise exact tests (such as Barnard's Exact test) should be used + because they do not overreject. + + The default degrees of freedom, k-1, are for the case when no parameters + of the distribution are estimated. If p parameters are estimated by + efficient maximum likelihood then the correct degrees of freedom are + k-1-p. If the parameters are estimated in a different way, then the + dof can be between k-1-p and k-1. However, it is also possible that + the asymptotic distribution is not chi-square, in which case this test + is not appropriate. + + For Pearson's chi-squared test, the total observed and expected counts must match + for the p-value to accurately reflect the probability of observing such an extreme + value of the statistic under the null hypothesis. + This function may be used to perform other statistical tests that do not require + the total counts to be equal. For instance, to test the null hypothesis that + ``f_obs[i]`` is Poisson-distributed with expectation ``f_exp[i]``, set ``ddof=-1`` + and ``sum_check=False``. This test follows from the fact that a Poisson random + variable with mean and variance ``f_exp[i]`` is approximately normal with the + same mean and variance; the chi-squared statistic standardizes, squares, and sums + the observations; and the sum of ``n`` squared standard normal variables follows + the chi-squared distribution with ``n`` degrees of freedom. + + References + ---------- + .. [1] "Pearson's chi-squared test". + *Wikipedia*. https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test + .. [2] Pearson, Karl. "On the criterion that a given system of deviations from the probable + in the case of a correlated system of variables is such that it can be reasonably + supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50 + (1900), pp. 157-175. + + Examples + -------- + When only the mandatory `f_obs` argument is given, it is assumed that the + expected frequencies are uniform and given by the mean of the observed + frequencies: + + >>> import numpy as np + >>> from scipy.stats import chisquare + >>> chisquare([16, 18, 16, 14, 12, 12]) + Power_divergenceResult(statistic=2.0, pvalue=0.84914503608460956) + + The optional `f_exp` argument gives the expected frequencies. + + >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8]) + Power_divergenceResult(statistic=3.5, pvalue=0.62338762774958223) + + When `f_obs` is 2-D, by default the test is applied to each column. + + >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T + >>> obs.shape + (6, 2) + >>> chisquare(obs) + Power_divergenceResult(statistic=array([2. , 6.66666667]), pvalue=array([0.84914504, 0.24663415])) + + By setting ``axis=None``, the test is applied to all data in the array, + which is equivalent to applying the test to the flattened array. + + >>> chisquare(obs, axis=None) + Power_divergenceResult(statistic=23.31034482758621, pvalue=0.015975692534127565) + >>> chisquare(obs.ravel()) + Power_divergenceResult(statistic=23.310344827586206, pvalue=0.01597569253412758) + + `ddof` is the change to make to the default degrees of freedom. + + >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1) + Power_divergenceResult(statistic=2.0, pvalue=0.7357588823428847) + + The calculation of the p-values is done by broadcasting the + chi-squared statistic with `ddof`. + + >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0, 1, 2]) + Power_divergenceResult(statistic=2.0, pvalue=array([0.84914504, 0.73575888, 0.5724067 ])) + + `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has + shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting + `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared + statistics, we use ``axis=1``: + + >>> chisquare([16, 18, 16, 14, 12, 12], + ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]], + ... axis=1) + Power_divergenceResult(statistic=array([3.5 , 9.25]), pvalue=array([0.62338763, 0.09949846])) + + For a more detailed example, see :ref:`hypothesis_chisquare`. + """ # noqa: E501 + return _power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, + lambda_="pearson", sum_check=sum_check) + + +KstestResult = _make_tuple_bunch('KstestResult', ['statistic', 'pvalue'], + ['statistic_location', 'statistic_sign']) + + +def _compute_dplus(cdfvals, x): + """Computes D+ as used in the Kolmogorov-Smirnov test. + + Parameters + ---------- + cdfvals : array_like + Sorted array of CDF values between 0 and 1 + x: array_like + Sorted array of the stochastic variable itself + + Returns + ------- + res: Pair with the following elements: + - The maximum distance of the CDF values below Uniform(0, 1). + - The location at which the maximum is reached. + + """ + n = len(cdfvals) + dplus = (np.arange(1.0, n + 1) / n - cdfvals) + amax = dplus.argmax() + loc_max = x[amax] + return (dplus[amax], loc_max) + + +def _compute_dminus(cdfvals, x): + """Computes D- as used in the Kolmogorov-Smirnov test. + + Parameters + ---------- + cdfvals : array_like + Sorted array of CDF values between 0 and 1 + x: array_like + Sorted array of the stochastic variable itself + + Returns + ------- + res: Pair with the following elements: + - Maximum distance of the CDF values above Uniform(0, 1) + - The location at which the maximum is reached. + """ + n = len(cdfvals) + dminus = (cdfvals - np.arange(0.0, n)/n) + amax = dminus.argmax() + loc_max = x[amax] + return (dminus[amax], loc_max) + + +def _tuple_to_KstestResult(statistic, pvalue, + statistic_location, statistic_sign): + return KstestResult(statistic, pvalue, + statistic_location=statistic_location, + statistic_sign=statistic_sign) + + +def _KstestResult_to_tuple(res): + return *res, res.statistic_location, res.statistic_sign + + +@_axis_nan_policy_factory(_tuple_to_KstestResult, n_samples=1, n_outputs=4, + result_to_tuple=_KstestResult_to_tuple) +@_rename_parameter("mode", "method") +def ks_1samp(x, cdf, args=(), alternative='two-sided', method='auto'): + """ + Performs the one-sample Kolmogorov-Smirnov test for goodness of fit. + + This test compares the underlying distribution F(x) of a sample + against a given continuous distribution G(x). See Notes for a description + of the available null and alternative hypotheses. + + Parameters + ---------- + x : array_like + a 1-D array of observations of iid random variables. + cdf : callable + callable used to calculate the cdf. + args : tuple, sequence, optional + Distribution parameters, used with `cdf`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes below. + method : {'auto', 'exact', 'approx', 'asymp'}, optional + Defines the distribution used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : selects one of the other options. + * 'exact' : uses the exact distribution of test statistic. + * 'approx' : approximates the two-sided probability with twice + the one-sided probability + * 'asymp': uses asymptotic distribution of test statistic + + Returns + ------- + res: KstestResult + An object containing attributes: + + statistic : float + KS test statistic, either D+, D-, or D (the maximum of the two) + pvalue : float + One-tailed or two-tailed p-value. + statistic_location : float + Value of `x` corresponding with the KS statistic; i.e., the + distance between the empirical distribution function and the + hypothesized cumulative distribution function is measured at this + observation. + statistic_sign : int + +1 if the KS statistic is the maximum positive difference between + the empirical distribution function and the hypothesized cumulative + distribution function (D+); -1 if the KS statistic is the maximum + negative difference (D-). + + + See Also + -------- + ks_2samp, kstest + + Notes + ----- + There are three options for the null and corresponding alternative + hypothesis that can be selected using the `alternative` parameter. + + - `two-sided`: The null hypothesis is that the two distributions are + identical, F(x)=G(x) for all x; the alternative is that they are not + identical. + + - `less`: The null hypothesis is that F(x) >= G(x) for all x; the + alternative is that F(x) < G(x) for at least one x. + + - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the + alternative is that F(x) > G(x) for at least one x. + + Note that the alternative hypotheses describe the *CDFs* of the + underlying distributions, not the observed values. For example, + suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in + x1 tend to be less than those in x2. + + Examples + -------- + Suppose we wish to test the null hypothesis that a sample is distributed + according to the standard normal. + We choose a confidence level of 95%; that is, we will reject the null + hypothesis in favor of the alternative if the p-value is less than 0.05. + + When testing uniformly distributed data, we would expect the + null hypothesis to be rejected. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> stats.ks_1samp(stats.uniform.rvs(size=100, random_state=rng), + ... stats.norm.cdf) + KstestResult(statistic=0.5001899973268688, + pvalue=1.1616392184763533e-23, + statistic_location=0.00047625268963724654, + statistic_sign=-1) + + Indeed, the p-value is lower than our threshold of 0.05, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the data + are *not* distributed according to the standard normal. + + When testing random variates from the standard normal distribution, we + expect the data to be consistent with the null hypothesis most of the time. + + >>> x = stats.norm.rvs(size=100, random_state=rng) + >>> stats.ks_1samp(x, stats.norm.cdf) + KstestResult(statistic=0.05345882212970396, + pvalue=0.9227159037744717, + statistic_location=-1.2451343873745018, + statistic_sign=1) + + As expected, the p-value of 0.92 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + Suppose, however, that the random variates are distributed according to + a normal distribution that is shifted toward greater values. In this case, + the cumulative density function (CDF) of the underlying distribution tends + to be *less* than the CDF of the standard normal. Therefore, we would + expect the null hypothesis to be rejected with ``alternative='less'``: + + >>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng) + >>> stats.ks_1samp(x, stats.norm.cdf, alternative='less') + KstestResult(statistic=0.17482387821055168, + pvalue=0.001913921057766743, + statistic_location=0.3713830565352756, + statistic_sign=-1) + + and indeed, with p-value smaller than our threshold, we reject the null + hypothesis in favor of the alternative. + + """ + mode = method + + alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( + alternative.lower()[0], alternative) + if alternative not in ['two-sided', 'greater', 'less']: + raise ValueError(f"Unexpected value {alternative=}") + + N = len(x) + x = np.sort(x) + cdfvals = cdf(x, *args) + np_one = np.int8(1) + + if alternative == 'greater': + Dplus, d_location = _compute_dplus(cdfvals, x) + return KstestResult(Dplus, distributions.ksone.sf(Dplus, N), + statistic_location=d_location, + statistic_sign=np_one) + + if alternative == 'less': + Dminus, d_location = _compute_dminus(cdfvals, x) + return KstestResult(Dminus, distributions.ksone.sf(Dminus, N), + statistic_location=d_location, + statistic_sign=-np_one) + + # alternative == 'two-sided': + Dplus, dplus_location = _compute_dplus(cdfvals, x) + Dminus, dminus_location = _compute_dminus(cdfvals, x) + if Dplus > Dminus: + D = Dplus + d_location = dplus_location + d_sign = np_one + else: + D = Dminus + d_location = dminus_location + d_sign = -np_one + + if mode == 'auto': # Always select exact + mode = 'exact' + if mode == 'exact': + prob = distributions.kstwo.sf(D, N) + elif mode == 'asymp': + prob = distributions.kstwobign.sf(D * np.sqrt(N)) + else: + # mode == 'approx' + prob = 2 * distributions.ksone.sf(D, N) + prob = np.clip(prob, 0, 1) + return KstestResult(D, prob, + statistic_location=d_location, + statistic_sign=d_sign) + + +Ks_2sampResult = KstestResult + + +def _compute_prob_outside_square(n, h): + """ + Compute the proportion of paths that pass outside the two diagonal lines. + + Parameters + ---------- + n : integer + n > 0 + h : integer + 0 <= h <= n + + Returns + ------- + p : float + The proportion of paths that pass outside the lines x-y = +/-h. + + """ + # Compute Pr(D_{n,n} >= h/n) + # Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) + # / binom(2n, n) + # This formulation exhibits subtractive cancellation. + # Instead divide each term by binom(2n, n), then factor common terms + # and use a Horner-like algorithm + # P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...))))) + + P = 0.0 + k = int(np.floor(n / h)) + while k >= 0: + p1 = 1.0 + # Each of the Ai terms has numerator and denominator with + # h simple terms. + for j in range(h): + p1 = (n - k * h - j) * p1 / (n + k * h + j + 1) + P = p1 * (1.0 - P) + k -= 1 + return 2 * P + + +def _count_paths_outside_method(m, n, g, h): + """Count the number of paths that pass outside the specified diagonal. + + Parameters + ---------- + m : integer + m > 0 + n : integer + n > 0 + g : integer + g is greatest common divisor of m and n + h : integer + 0 <= h <= lcm(m,n) + + Returns + ------- + p : float + The number of paths that go low. + The calculation may overflow - check for a finite answer. + + Notes + ----- + Count the integer lattice paths from (0, 0) to (m, n), which at some + point (x, y) along the path, satisfy: + m*y <= n*x - h*g + The paths make steps of size +1 in either positive x or positive y + directions. + + We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk. + Hodges, J.L. Jr., + "The Significance Probability of the Smirnov Two-Sample Test," + Arkiv fiur Matematik, 3, No. 43 (1958), 469-86. + + """ + # Compute #paths which stay lower than x/m-y/n = h/lcm(m,n) + # B(x, y) = #{paths from (0,0) to (x,y) without + # previously crossing the boundary} + # = binom(x, y) - #{paths which already reached the boundary} + # Multiply by the number of path extensions going from (x, y) to (m, n) + # Sum. + + # Probability is symmetrical in m, n. Computation below assumes m >= n. + if m < n: + m, n = n, m + mg = m // g + ng = n // g + + # Not every x needs to be considered. + # xj holds the list of x values to be checked. + # Wherever n*x/m + ng*h crosses an integer + lxj = n + (mg-h)//mg + xj = [(h + mg * j + ng-1)//ng for j in range(lxj)] + # B is an array just holding a few values of B(x,y), the ones needed. + # B[j] == B(x_j, j) + if lxj == 0: + return special.binom(m + n, n) + B = np.zeros(lxj) + B[0] = 1 + # Compute the B(x, y) terms + for j in range(1, lxj): + Bj = special.binom(xj[j] + j, j) + for i in range(j): + bin = special.binom(xj[j] - xj[i] + j - i, j-i) + Bj -= bin * B[i] + B[j] = Bj + # Compute the number of path extensions... + num_paths = 0 + for j in range(lxj): + bin = special.binom((m-xj[j]) + (n - j), n-j) + term = B[j] * bin + num_paths += term + return num_paths + + +def _attempt_exact_2kssamp(n1, n2, g, d, alternative): + """Attempts to compute the exact 2sample probability. + + n1, n2 are the sample sizes + g is the gcd(n1, n2) + d is the computed max difference in ECDFs + + Returns (success, d, probability) + """ + lcm = (n1 // g) * n2 + h = int(np.round(d * lcm)) + d = h * 1.0 / lcm + if h == 0: + return True, d, 1.0 + saw_fp_error, prob = False, np.nan + try: + with np.errstate(invalid="raise", over="raise"): + if alternative == 'two-sided': + if n1 == n2: + prob = _compute_prob_outside_square(n1, h) + else: + prob = _compute_outer_prob_inside_method(n1, n2, g, h) + else: + if n1 == n2: + # prob = binom(2n, n-h) / binom(2n, n) + # Evaluating in that form incurs roundoff errors + # from special.binom. Instead calculate directly + jrange = np.arange(h) + prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0)) + else: + with np.errstate(over='raise'): + num_paths = _count_paths_outside_method(n1, n2, g, h) + bin = special.binom(n1 + n2, n1) + if num_paths > bin or np.isinf(bin): + saw_fp_error = True + else: + prob = num_paths / bin + + except (FloatingPointError, OverflowError): + saw_fp_error = True + + if saw_fp_error: + return False, d, np.nan + if not (0 <= prob <= 1): + return False, d, prob + return True, d, prob + + +@_axis_nan_policy_factory(_tuple_to_KstestResult, n_samples=2, n_outputs=4, + result_to_tuple=_KstestResult_to_tuple) +@_rename_parameter("mode", "method") +def ks_2samp(data1, data2, alternative='two-sided', method='auto'): + """ + Performs the two-sample Kolmogorov-Smirnov test for goodness of fit. + + This test compares the underlying continuous distributions F(x) and G(x) + of two independent samples. See Notes for a description of the available + null and alternative hypotheses. + + Parameters + ---------- + data1, data2 : array_like, 1-Dimensional + Two arrays of sample observations assumed to be drawn from a continuous + distribution, sample sizes can be different. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes below. + method : {'auto', 'exact', 'asymp'}, optional + Defines the method used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : use 'exact' for small size arrays, 'asymp' for large + * 'exact' : use exact distribution of test statistic + * 'asymp' : use asymptotic distribution of test statistic + + Returns + ------- + res: KstestResult + An object containing attributes: + + statistic : float + KS test statistic. + pvalue : float + One-tailed or two-tailed p-value. + statistic_location : float + Value from `data1` or `data2` corresponding with the KS statistic; + i.e., the distance between the empirical distribution functions is + measured at this observation. + statistic_sign : int + +1 if the empirical distribution function of `data1` exceeds + the empirical distribution function of `data2` at + `statistic_location`, otherwise -1. + + See Also + -------- + kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp + + Notes + ----- + There are three options for the null and corresponding alternative + hypothesis that can be selected using the `alternative` parameter. + + - `less`: The null hypothesis is that F(x) >= G(x) for all x; the + alternative is that F(x) < G(x) for at least one x. The statistic + is the magnitude of the minimum (most negative) difference between the + empirical distribution functions of the samples. + + - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the + alternative is that F(x) > G(x) for at least one x. The statistic + is the maximum (most positive) difference between the empirical + distribution functions of the samples. + + - `two-sided`: The null hypothesis is that the two distributions are + identical, F(x)=G(x) for all x; the alternative is that they are not + identical. The statistic is the maximum absolute difference between the + empirical distribution functions of the samples. + + Note that the alternative hypotheses describe the *CDFs* of the + underlying distributions, not the observed values of the data. For example, + suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in + x1 tend to be less than those in x2. + + If the KS statistic is large, then the p-value will be small, and this may + be taken as evidence against the null hypothesis in favor of the + alternative. + + If ``method='exact'``, `ks_2samp` attempts to compute an exact p-value, + that is, the probability under the null hypothesis of obtaining a test + statistic value as extreme as the value computed from the data. + If ``method='asymp'``, the asymptotic Kolmogorov-Smirnov distribution is + used to compute an approximate p-value. + If ``method='auto'``, an exact p-value computation is attempted if both + sample sizes are less than 10000; otherwise, the asymptotic method is used. + In any case, if an exact p-value calculation is attempted and fails, a + warning will be emitted, and the asymptotic p-value will be returned. + + The 'two-sided' 'exact' computation computes the complementary probability + and then subtracts from 1. As such, the minimum probability it can return + is about 1e-16. While the algorithm itself is exact, numerical + errors may accumulate for large sample sizes. It is most suited to + situations in which one of the sample sizes is only a few thousand. + + We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_. + + References + ---------- + .. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov + Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-486. + + Examples + -------- + Suppose we wish to test the null hypothesis that two samples were drawn + from the same distribution. + We choose a confidence level of 95%; that is, we will reject the null + hypothesis in favor of the alternative if the p-value is less than 0.05. + + If the first sample were drawn from a uniform distribution and the second + were drawn from the standard normal, we would expect the null hypothesis + to be rejected. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> sample1 = stats.uniform.rvs(size=100, random_state=rng) + >>> sample2 = stats.norm.rvs(size=110, random_state=rng) + >>> stats.ks_2samp(sample1, sample2) + KstestResult(statistic=0.5454545454545454, + pvalue=7.37417839555191e-15, + statistic_location=-0.014071496412861274, + statistic_sign=-1) + + + Indeed, the p-value is lower than our threshold of 0.05, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the data + were *not* drawn from the same distribution. + + When both samples are drawn from the same distribution, we expect the data + to be consistent with the null hypothesis most of the time. + + >>> sample1 = stats.norm.rvs(size=105, random_state=rng) + >>> sample2 = stats.norm.rvs(size=95, random_state=rng) + >>> stats.ks_2samp(sample1, sample2) + KstestResult(statistic=0.10927318295739348, + pvalue=0.5438289009927495, + statistic_location=-0.1670157701848795, + statistic_sign=-1) + + As expected, the p-value of 0.54 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + Suppose, however, that the first sample were drawn from + a normal distribution shifted toward greater values. In this case, + the cumulative density function (CDF) of the underlying distribution tends + to be *less* than the CDF underlying the second sample. Therefore, we would + expect the null hypothesis to be rejected with ``alternative='less'``: + + >>> sample1 = stats.norm.rvs(size=105, loc=0.5, random_state=rng) + >>> stats.ks_2samp(sample1, sample2, alternative='less') + KstestResult(statistic=0.4055137844611529, + pvalue=3.5474563068855554e-08, + statistic_location=-0.13249370614972575, + statistic_sign=-1) + + and indeed, with p-value smaller than our threshold, we reject the null + hypothesis in favor of the alternative. + + """ + mode = method + + if mode not in ['auto', 'exact', 'asymp']: + raise ValueError(f'Invalid value for mode: {mode}') + alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( + alternative.lower()[0], alternative) + if alternative not in ['two-sided', 'less', 'greater']: + raise ValueError(f'Invalid value for alternative: {alternative}') + MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N + if np.ma.is_masked(data1): + data1 = data1.compressed() + if np.ma.is_masked(data2): + data2 = data2.compressed() + data1 = np.sort(data1) + data2 = np.sort(data2) + n1 = data1.shape[0] + n2 = data2.shape[0] + if min(n1, n2) == 0: + raise ValueError('Data passed to ks_2samp must not be empty') + + data_all = np.concatenate([data1, data2]) + # using searchsorted solves equal data problem + cdf1 = np.searchsorted(data1, data_all, side='right') / n1 + cdf2 = np.searchsorted(data2, data_all, side='right') / n2 + cddiffs = cdf1 - cdf2 + + # Identify the location of the statistic + argminS = np.argmin(cddiffs) + argmaxS = np.argmax(cddiffs) + loc_minS = data_all[argminS] + loc_maxS = data_all[argmaxS] + + # Ensure sign of minS is not negative. + minS = np.clip(-cddiffs[argminS], 0, 1) + maxS = cddiffs[argmaxS] + + if alternative == 'less' or (alternative == 'two-sided' and minS > maxS): + d = minS + d_location = loc_minS + d_sign = -1 + else: + d = maxS + d_location = loc_maxS + d_sign = 1 + g = gcd(n1, n2) + n1g = n1 // g + n2g = n2 // g + prob = -np.inf + if mode == 'auto': + mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp' + elif mode == 'exact': + # If lcm(n1, n2) is too big, switch from exact to asymp + if n1g >= np.iinfo(np.int32).max / n2g: + mode = 'asymp' + warnings.warn( + f"Exact ks_2samp calculation not possible with samples sizes " + f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning, + stacklevel=3) + + if mode == 'exact': + success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative) + if not success: + mode = 'asymp' + warnings.warn(f"ks_2samp: Exact calculation unsuccessful. " + f"Switching to method={mode}.", RuntimeWarning, + stacklevel=3) + + if mode == 'asymp': + # The product n1*n2 is large. Use Smirnov's asymptotic formula. + # Ensure float to avoid overflow in multiplication + # sorted because the one-sided formula is not symmetric in n1, n2 + m, n = sorted([float(n1), float(n2)], reverse=True) + en = m * n / (m + n) + if alternative == 'two-sided': + prob = distributions.kstwo.sf(d, np.round(en)) + else: + z = np.sqrt(en) * d + # Use Hodges' suggested approximation Eqn 5.3 + # Requires m to be the larger of (n1, n2) + expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0 + prob = np.exp(expt) + + prob = np.clip(prob, 0, 1) + # Currently, `d` is a Python float. We want it to be a NumPy type, so + # float64 is appropriate. An enhancement would be for `d` to respect the + # dtype of the input. + return KstestResult(np.float64(d), prob, statistic_location=d_location, + statistic_sign=np.int8(d_sign)) + + +def _parse_kstest_args(data1, data2, args, N): + # kstest allows many different variations of arguments. + # Pull out the parsing into a separate function + # (xvals, yvals, ) # 2sample + # (xvals, cdf function,..) + # (xvals, name of distribution, ...) + # (name of distribution, name of distribution, ...) + + # Returns xvals, yvals, cdf + # where cdf is a cdf function, or None + # and yvals is either an array_like of values, or None + # and xvals is array_like. + rvsfunc, cdf = None, None + if isinstance(data1, str): + rvsfunc = getattr(distributions, data1).rvs + elif callable(data1): + rvsfunc = data1 + + if isinstance(data2, str): + cdf = getattr(distributions, data2).cdf + data2 = None + elif callable(data2): + cdf = data2 + data2 = None + + data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1) + return data1, data2, cdf + + +def _kstest_n_samples(kwargs): + cdf = kwargs['cdf'] + return 1 if (isinstance(cdf, str) or callable(cdf)) else 2 + + +@_axis_nan_policy_factory(_tuple_to_KstestResult, n_samples=_kstest_n_samples, + n_outputs=4, result_to_tuple=_KstestResult_to_tuple) +@_rename_parameter("mode", "method") +def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', method='auto'): + """ + Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for + goodness of fit. + + The one-sample test compares the underlying distribution F(x) of a sample + against a given distribution G(x). The two-sample test compares the + underlying distributions of two independent samples. Both tests are valid + only for continuous distributions. + + Parameters + ---------- + rvs : str, array_like, or callable + If an array, it should be a 1-D array of observations of random + variables. + If a callable, it should be a function to generate random variables; + it is required to have a keyword argument `size`. + If a string, it should be the name of a distribution in `scipy.stats`, + which will be used to generate random variables. + cdf : str, array_like or callable + If array_like, it should be a 1-D array of observations of random + variables, and the two-sample test is performed + (and rvs must be array_like). + If a callable, that callable is used to calculate the cdf. + If a string, it should be the name of a distribution in `scipy.stats`, + which will be used as the cdf function. + args : tuple, sequence, optional + Distribution parameters, used if `rvs` or `cdf` are strings or + callables. + N : int, optional + Sample size if `rvs` is string or callable. Default is 20. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes below. + method : {'auto', 'exact', 'approx', 'asymp'}, optional + Defines the distribution used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : selects one of the other options. + * 'exact' : uses the exact distribution of test statistic. + * 'approx' : approximates the two-sided probability with twice the + one-sided probability + * 'asymp': uses asymptotic distribution of test statistic + + Returns + ------- + res: KstestResult + An object containing attributes: + + statistic : float + KS test statistic, either D+, D-, or D (the maximum of the two) + pvalue : float + One-tailed or two-tailed p-value. + statistic_location : float + In a one-sample test, this is the value of `rvs` + corresponding with the KS statistic; i.e., the distance between + the empirical distribution function and the hypothesized cumulative + distribution function is measured at this observation. + + In a two-sample test, this is the value from `rvs` or `cdf` + corresponding with the KS statistic; i.e., the distance between + the empirical distribution functions is measured at this + observation. + statistic_sign : int + In a one-sample test, this is +1 if the KS statistic is the + maximum positive difference between the empirical distribution + function and the hypothesized cumulative distribution function + (D+); it is -1 if the KS statistic is the maximum negative + difference (D-). + + In a two-sample test, this is +1 if the empirical distribution + function of `rvs` exceeds the empirical distribution + function of `cdf` at `statistic_location`, otherwise -1. + + See Also + -------- + ks_1samp, ks_2samp + + Notes + ----- + There are three options for the null and corresponding alternative + hypothesis that can be selected using the `alternative` parameter. + + - `two-sided`: The null hypothesis is that the two distributions are + identical, F(x)=G(x) for all x; the alternative is that they are not + identical. + + - `less`: The null hypothesis is that F(x) >= G(x) for all x; the + alternative is that F(x) < G(x) for at least one x. + + - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the + alternative is that F(x) > G(x) for at least one x. + + Note that the alternative hypotheses describe the *CDFs* of the + underlying distributions, not the observed values. For example, + suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in + x1 tend to be less than those in x2. + + + Examples + -------- + Suppose we wish to test the null hypothesis that a sample is distributed + according to the standard normal. + We choose a confidence level of 95%; that is, we will reject the null + hypothesis in favor of the alternative if the p-value is less than 0.05. + + When testing uniformly distributed data, we would expect the + null hypothesis to be rejected. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> stats.kstest(stats.uniform.rvs(size=100, random_state=rng), + ... stats.norm.cdf) + KstestResult(statistic=0.5001899973268688, + pvalue=1.1616392184763533e-23, + statistic_location=0.00047625268963724654, + statistic_sign=-1) + + Indeed, the p-value is lower than our threshold of 0.05, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the data + are *not* distributed according to the standard normal. + + When testing random variates from the standard normal distribution, we + expect the data to be consistent with the null hypothesis most of the time. + + >>> x = stats.norm.rvs(size=100, random_state=rng) + >>> stats.kstest(x, stats.norm.cdf) + KstestResult(statistic=0.05345882212970396, + pvalue=0.9227159037744717, + statistic_location=-1.2451343873745018, + statistic_sign=1) + + + As expected, the p-value of 0.92 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + Suppose, however, that the random variates are distributed according to + a normal distribution that is shifted toward greater values. In this case, + the cumulative density function (CDF) of the underlying distribution tends + to be *less* than the CDF of the standard normal. Therefore, we would + expect the null hypothesis to be rejected with ``alternative='less'``: + + >>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng) + >>> stats.kstest(x, stats.norm.cdf, alternative='less') + KstestResult(statistic=0.17482387821055168, + pvalue=0.001913921057766743, + statistic_location=0.3713830565352756, + statistic_sign=-1) + + and indeed, with p-value smaller than our threshold, we reject the null + hypothesis in favor of the alternative. + + For convenience, the previous test can be performed using the name of the + distribution as the second argument. + + >>> stats.kstest(x, "norm", alternative='less') + KstestResult(statistic=0.17482387821055168, + pvalue=0.001913921057766743, + statistic_location=0.3713830565352756, + statistic_sign=-1) + + The examples above have all been one-sample tests identical to those + performed by `ks_1samp`. Note that `kstest` can also perform two-sample + tests identical to those performed by `ks_2samp`. For example, when two + samples are drawn from the same distribution, we expect the data to be + consistent with the null hypothesis most of the time. + + >>> sample1 = stats.laplace.rvs(size=105, random_state=rng) + >>> sample2 = stats.laplace.rvs(size=95, random_state=rng) + >>> stats.kstest(sample1, sample2) + KstestResult(statistic=0.11779448621553884, + pvalue=0.4494256912629795, + statistic_location=0.6138814275424155, + statistic_sign=1) + + As expected, the p-value of 0.45 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + """ + # to not break compatibility with existing code + if alternative == 'two_sided': + alternative = 'two-sided' + if alternative not in ['two-sided', 'greater', 'less']: + raise ValueError(f"Unexpected alternative: {alternative}") + xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N) + if cdf: + return ks_1samp(xvals, cdf, args=args, alternative=alternative, + method=method, _no_deco=True) + return ks_2samp(xvals, yvals, alternative=alternative, method=method, + _no_deco=True) + + +def tiecorrect(rankvals): + """Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests. + + Parameters + ---------- + rankvals : array_like + A 1-D sequence of ranks. Typically this will be the array + returned by `~scipy.stats.rankdata`. + + Returns + ------- + factor : float + Correction factor for U or H. + + See Also + -------- + rankdata : Assign ranks to the data + mannwhitneyu : Mann-Whitney rank test + kruskal : Kruskal-Wallis H test + + References + ---------- + .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral + Sciences. New York: McGraw-Hill. + + Examples + -------- + >>> from scipy.stats import tiecorrect, rankdata + >>> tiecorrect([1, 2.5, 2.5, 4]) + 0.9 + >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) + >>> ranks + array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) + >>> tiecorrect(ranks) + 0.9833333333333333 + + """ + arr = np.sort(rankvals) + idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0] + cnt = np.diff(idx).astype(np.float64) + + size = np.float64(arr.size) + return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size) + + +RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(RanksumsResult, n_samples=2) +def ranksums(x, y, alternative='two-sided'): + """Compute the Wilcoxon rank-sum statistic for two samples. + + The Wilcoxon rank-sum test tests the null hypothesis that two sets + of measurements are drawn from the same distribution. The alternative + hypothesis is that values in one sample are more likely to be + larger than the values in the other sample. + + This test should be used to compare two samples from continuous + distributions. It does not handle ties between measurements + in x and y. For tie-handling and an optional continuity correction + see `scipy.stats.mannwhitneyu`. + + Parameters + ---------- + x,y : array_like + The data from the two samples. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': one of the distributions (underlying `x` or `y`) is + stochastically greater than the other. + * 'less': the distribution underlying `x` is stochastically less + than the distribution underlying `y`. + * 'greater': the distribution underlying `x` is stochastically greater + than the distribution underlying `y`. + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The test statistic under the large-sample approximation that the + rank sum statistic is normally distributed. + pvalue : float + The p-value of the test. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test + + Examples + -------- + We can test the hypothesis that two independent unequal-sized samples are + drawn from the same distribution with computing the Wilcoxon rank-sum + statistic. + + >>> import numpy as np + >>> from scipy.stats import ranksums + >>> rng = np.random.default_rng() + >>> sample1 = rng.uniform(-1, 1, 200) + >>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution + >>> ranksums(sample1, sample2) + RanksumsResult(statistic=-7.887059, + pvalue=3.09390448e-15) # may vary + >>> ranksums(sample1, sample2, alternative='less') + RanksumsResult(statistic=-7.750585297581713, + pvalue=4.573497606342543e-15) # may vary + >>> ranksums(sample1, sample2, alternative='greater') + RanksumsResult(statistic=-7.750585297581713, + pvalue=0.9999999999999954) # may vary + + The p-value of less than ``0.05`` indicates that this test rejects the + hypothesis at the 5% significance level. + + """ + x, y = map(np.asarray, (x, y)) + n1 = len(x) + n2 = len(y) + alldata = np.concatenate((x, y)) + ranked = rankdata(alldata) + x = ranked[:n1] + s = np.sum(x, axis=0) + expected = n1 * (n1+n2+1) / 2.0 + z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) + pvalue = _get_pvalue(z, _SimpleNormal(), alternative, xp=np) + + return RanksumsResult(z[()], pvalue[()]) + + +KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(KruskalResult, n_samples=None) +def kruskal(*samples, nan_policy='propagate'): + """Compute the Kruskal-Wallis H-test for independent samples. + + The Kruskal-Wallis H-test tests the null hypothesis that the population + median of all of the groups are equal. It is a non-parametric version of + ANOVA. The test works on 2 or more independent samples, which may have + different sizes. Note that rejecting the null hypothesis does not + indicate which of the groups differs. Post hoc comparisons between + groups are required to determine which groups are different. + + Parameters + ---------- + sample1, sample2, ... : array_like + Two or more arrays with the sample measurements can be given as + arguments. Samples must be one-dimensional. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + statistic : float + The Kruskal-Wallis H statistic, corrected for ties. + pvalue : float + The p-value for the test using the assumption that H has a chi + square distribution. The p-value returned is the survival function of + the chi square distribution evaluated at H. + + See Also + -------- + f_oneway : 1-way ANOVA. + mannwhitneyu : Mann-Whitney rank test on two samples. + friedmanchisquare : Friedman test for repeated measurements. + + Notes + ----- + Due to the assumption that H has a chi square distribution, the number + of samples in each group must not be too small. A typical rule is + that each sample must have at least 5 measurements. + + References + ---------- + .. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in + One-Criterion Variance Analysis", Journal of the American Statistical + Association, Vol. 47, Issue 260, pp. 583-621, 1952. + .. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance + + Examples + -------- + >>> from scipy import stats + >>> x = [1, 3, 5, 7, 9] + >>> y = [2, 4, 6, 8, 10] + >>> stats.kruskal(x, y) + KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895) + + >>> x = [1, 1, 1] + >>> y = [2, 2, 2] + >>> z = [2, 2] + >>> stats.kruskal(x, y, z) + KruskalResult(statistic=7.0, pvalue=0.0301973834223185) + + """ + samples = list(map(np.asarray, samples)) + + num_groups = len(samples) + if num_groups < 2: + raise ValueError("Need at least two groups in stats.kruskal()") + + n = np.asarray(list(map(len, samples))) + + alldata = np.concatenate(samples) + ranked = rankdata(alldata) + ties = tiecorrect(ranked) + if ties == 0: + raise ValueError('All numbers are identical in kruskal') + + # Compute sum^2/n for each group and sum + j = np.insert(np.cumsum(n), 0, 0) + ssbn = 0 + for i in range(num_groups): + ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i] + + totaln = np.sum(n, dtype=float) + h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1) + df = num_groups - 1 + h /= ties + + chi2 = _SimpleChi2(df) + pvalue = _get_pvalue(h, chi2, alternative='greater', symmetric=False, xp=np) + return KruskalResult(h, pvalue) + + +FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', + ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(FriedmanchisquareResult, n_samples=None, paired=True) +def friedmanchisquare(*samples): + """Compute the Friedman test for repeated samples. + + The Friedman test tests the null hypothesis that repeated samples of + the same individuals have the same distribution. It is often used + to test for consistency among samples obtained in different ways. + For example, if two sampling techniques are used on the same set of + individuals, the Friedman test can be used to determine if the two + sampling techniques are consistent. + + Parameters + ---------- + sample1, sample2, sample3... : array_like + Arrays of observations. All of the arrays must have the same number + of elements. At least three samples must be given. + + Returns + ------- + statistic : float + The test statistic, correcting for ties. + pvalue : float + The associated p-value assuming that the test statistic has a chi + squared distribution. + + See Also + -------- + :ref:`hypothesis_friedmanchisquare` : Extended example + + Notes + ----- + Due to the assumption that the test statistic has a chi squared + distribution, the p-value is only reliable for n > 10 and more than + 6 repeated samples. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Friedman_test + .. [2] Demsar, J. (2006). Statistical comparisons of classifiers over + multiple data sets. Journal of Machine Learning Research, 7, 1-30. + + Examples + -------- + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=18) + >>> x = rng.random((6, 10)) + >>> from scipy.stats import friedmanchisquare + >>> res = friedmanchisquare(x[0], x[1], x[2], x[3], x[4], x[5]) + >>> res.statistic, res.pvalue + (11.428571428571416, 0.043514520866727614) + + The p-value is less than 0.05; however, as noted above, the results may not + be reliable since we have a small number of repeated samples. + + For a more detailed example, see :ref:`hypothesis_friedmanchisquare`. + """ + k = len(samples) + if k < 3: + raise ValueError('At least 3 sets of samples must be given ' + f'for Friedman test, got {k}.') + + n = len(samples[0]) + for i in range(1, k): + if len(samples[i]) != n: + raise ValueError('Unequal N in friedmanchisquare. Aborting.') + + # Rank data + data = np.vstack(samples).T + data = data.astype(float) + for i in range(len(data)): + data[i] = rankdata(data[i]) + + # Handle ties + ties = 0 + for d in data: + _, repnum = _find_repeats(np.array(d, dtype=np.float64)) + for t in repnum: + ties += t * (t*t - 1) + c = 1 - ties / (k*(k*k - 1)*n) + + ssbn = np.sum(data.sum(axis=0)**2) + statistic = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c + + chi2 = _SimpleChi2(k - 1) + pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=np) + return FriedmanchisquareResult(statistic, pvalue) + + +BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', + ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(BrunnerMunzelResult, n_samples=2) +def brunnermunzel(x, y, alternative="two-sided", distribution="t", + nan_policy='propagate'): + """Compute the Brunner-Munzel test on samples x and y. + + The Brunner-Munzel test is a nonparametric test of the null hypothesis that + when values are taken one by one from each group, the probabilities of + getting large values in both groups are equal. + Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the + assumption of equivariance of two groups. Note that this does not assume + the distributions are same. This test works on two independent samples, + which may have different sizes. + + Parameters + ---------- + x, y : array_like + Array of samples, should be one-dimensional. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided' + * 'less': one-sided + * 'greater': one-sided + distribution : {'t', 'normal'}, optional + Defines how to get the p-value. + The following options are available (default is 't'): + + * 't': get the p-value by t-distribution + * 'normal': get the p-value by standard normal distribution. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + statistic : float + The Brunner-Munzer W statistic. + pvalue : float + p-value assuming an t distribution. One-sided or + two-sided, depending on the choice of `alternative` and `distribution`. + + See Also + -------- + mannwhitneyu : Mann-Whitney rank test on two samples. + + Notes + ----- + Brunner and Munzel recommended to estimate the p-value by t-distribution + when the size of data is 50 or less. If the size is lower than 10, it would + be better to use permuted Brunner Munzel test (see [2]_). + + References + ---------- + .. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher + problem: Asymptotic theory and a small-sample approximation". + Biometrical Journal. Vol. 42(2000): 17-25. + .. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the + non-parametric Behrens-Fisher problem". Computational Statistics and + Data Analysis. Vol. 51(2007): 5192-5204. + + Examples + -------- + >>> from scipy import stats + >>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1] + >>> x2 = [3,3,4,3,1,2,3,1,1,5,4] + >>> w, p_value = stats.brunnermunzel(x1, x2) + >>> w + 3.1374674823029505 + >>> p_value + 0.0057862086661515377 + + """ + nx = len(x) + ny = len(y) + + rankc = rankdata(np.concatenate((x, y))) + rankcx = rankc[0:nx] + rankcy = rankc[nx:nx+ny] + rankcx_mean = np.mean(rankcx) + rankcy_mean = np.mean(rankcy) + rankx = rankdata(x) + ranky = rankdata(y) + rankx_mean = np.mean(rankx) + ranky_mean = np.mean(ranky) + + Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0)) + Sx /= nx - 1 + Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0)) + Sy /= ny - 1 + + wbfn = nx * ny * (rankcy_mean - rankcx_mean) + wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy) + + if distribution == "t": + df_numer = np.power(nx * Sx + ny * Sy, 2.0) + df_denom = np.power(nx * Sx, 2.0) / (nx - 1) + df_denom += np.power(ny * Sy, 2.0) / (ny - 1) + df = df_numer / df_denom + + if (df_numer == 0) and (df_denom == 0): + message = ("p-value cannot be estimated with `distribution='t' " + "because degrees of freedom parameter is undefined " + "(0/0). Try using `distribution='normal'") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + distribution = _SimpleStudentT(df) + elif distribution == "normal": + distribution = _SimpleNormal() + else: + raise ValueError( + "distribution should be 't' or 'normal'") + + p = _get_pvalue(-wbfn, distribution, alternative, xp=np) + + return BrunnerMunzelResult(wbfn, p) + + +@_axis_nan_policy_factory(SignificanceResult, kwd_samples=['weights'], paired=True) +def combine_pvalues(pvalues, method='fisher', weights=None, *, axis=0): + """ + Combine p-values from independent tests that bear upon the same hypothesis. + + These methods are intended only for combining p-values from hypothesis + tests based upon continuous distributions. + + Each method assumes that under the null hypothesis, the p-values are + sampled independently and uniformly from the interval [0, 1]. A test + statistic (different for each method) is computed and a combined + p-value is calculated based upon the distribution of this test statistic + under the null hypothesis. + + Parameters + ---------- + pvalues : array_like + Array of p-values assumed to come from independent tests based on + continuous distributions. + method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'} + + Name of method to use to combine p-values. + + The available methods are (see Notes for details): + + * 'fisher': Fisher's method (Fisher's combined probability test) + * 'pearson': Pearson's method + * 'mudholkar_george': Mudholkar's and George's method + * 'tippett': Tippett's method + * 'stouffer': Stouffer's Z-score method + weights : array_like, optional + Optional array of weights used only for Stouffer's Z-score method. + Ignored by other methods. + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + The statistic calculated by the specified method. + pvalue : float + The combined p-value. + + Examples + -------- + Suppose we wish to combine p-values from four independent tests + of the same null hypothesis using Fisher's method (default). + + >>> from scipy.stats import combine_pvalues + >>> pvalues = [0.1, 0.05, 0.02, 0.3] + >>> combine_pvalues(pvalues) + SignificanceResult(statistic=20.828626352604235, pvalue=0.007616871850449092) + + When the individual p-values carry different weights, consider Stouffer's + method. + + >>> weights = [1, 2, 3, 4] + >>> res = combine_pvalues(pvalues, method='stouffer', weights=weights) + >>> res.pvalue + 0.009578891494533616 + + Notes + ----- + If this function is applied to tests with a discrete statistics such as + any rank test or contingency-table test, it will yield systematically + wrong results, e.g. Fisher's method will systematically overestimate the + p-value [1]_. This problem becomes less severe for large sample sizes + when the discrete distributions become approximately continuous. + + The differences between the methods can be best illustrated by their + statistics and what aspects of a combination of p-values they emphasise + when considering significance [2]_. For example, methods emphasising large + p-values are more sensitive to strong false and true negatives; conversely + methods focussing on small p-values are sensitive to positives. + + * The statistics of Fisher's method (also known as Fisher's combined + probability test) [3]_ is :math:`-2\\sum_i \\log(p_i)`, which is + equivalent (as a test statistics) to the product of individual p-values: + :math:`\\prod_i p_i`. Under the null hypothesis, this statistics follows + a :math:`\\chi^2` distribution. This method emphasises small p-values. + * Pearson's method uses :math:`-2\\sum_i\\log(1-p_i)`, which is equivalent + to :math:`\\prod_i \\frac{1}{1-p_i}` [2]_. + It thus emphasises large p-values. + * Mudholkar and George compromise between Fisher's and Pearson's method by + averaging their statistics [4]_. Their method emphasises extreme + p-values, both close to 1 and 0. + * Stouffer's method [5]_ uses Z-scores and the statistic: + :math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the + standard normal distribution. The advantage of this method is that it is + straightforward to introduce weights, which can make Stouffer's method + more powerful than Fisher's method when the p-values are from studies + of different size [6]_ [7]_. + * Tippett's method uses the smallest p-value as a statistic. + (Mind that this minimum is not the combined p-value.) + + Fisher's method may be extended to combine p-values from dependent tests + [8]_. Extensions such as Brown's method and Kost's method are not currently + implemented. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete + Distributions." Journal of the American Statistical Association 57, + no. 297 (1962), 10-19. + .. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of + combining p-values." Biometrika 105.1 (2018): 239-246. + .. [3] https://en.wikipedia.org/wiki/Fisher%27s_method + .. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic + random variables." Metrika 30.1 (1983): 1-13. + .. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method + .. [6] Whitlock, M. C. "Combining probability from independent tests: the + weighted Z-method is superior to Fisher's approach." Journal of + Evolutionary Biology 18, no. 5 (2005): 1368-1373. + .. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method + for combining probabilities in meta-analysis." Journal of + Evolutionary Biology 24, no. 8 (2011): 1836-1841. + .. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method + + """ + xp = array_namespace(pvalues) + pvalues = xp.asarray(pvalues) + if xp_size(pvalues) == 0: + # This is really only needed for *testing* _axis_nan_policy decorator + # It won't happen when the decorator is used. + NaN = _get_nan(pvalues) + return SignificanceResult(NaN, NaN) + + n = pvalues.shape[axis] + # used to convert Python scalar to the right dtype + one = xp.asarray(1, dtype=pvalues.dtype) + + if method == 'fisher': + statistic = -2 * xp.sum(xp.log(pvalues), axis=axis) + chi2 = _SimpleChi2(2*n*one) + pval = _get_pvalue(statistic, chi2, alternative='greater', + symmetric=False, xp=xp) + elif method == 'pearson': + statistic = 2 * xp.sum(xp.log1p(-pvalues), axis=axis) + chi2 = _SimpleChi2(2*n*one) + pval = _get_pvalue(-statistic, chi2, alternative='less', symmetric=False, xp=xp) + elif method == 'mudholkar_george': + normalizing_factor = math.sqrt(3/n)/xp.pi + statistic = (-xp.sum(xp.log(pvalues), axis=axis) + + xp.sum(xp.log1p(-pvalues), axis=axis)) + nu = 5*n + 4 + approx_factor = math.sqrt(nu / (nu - 2)) + t = _SimpleStudentT(nu*one) + pval = _get_pvalue(statistic * normalizing_factor * approx_factor, t, + alternative="greater", xp=xp) + elif method == 'tippett': + statistic = xp.min(pvalues, axis=axis) + beta = _SimpleBeta(one, n*one) + pval = _get_pvalue(statistic, beta, alternative='less', symmetric=False, xp=xp) + elif method == 'stouffer': + if weights is None: + weights = xp.ones_like(pvalues, dtype=pvalues.dtype) + elif weights.shape[axis] != n: + raise ValueError("pvalues and weights must be of the same " + "length along `axis`.") + + norm = _SimpleNormal() + Zi = norm.isf(pvalues) + # could use `einsum` or clever `matmul` for performance, + # but this is the most readable + statistic = (xp.sum(weights * Zi, axis=axis) + / xp_vector_norm(weights, axis=axis)) + pval = _get_pvalue(statistic, norm, alternative="greater", xp=xp) + + else: + raise ValueError( + f"Invalid method {method!r}. Valid methods are 'fisher', " + "'pearson', 'mudholkar_george', 'tippett', and 'stouffer'" + ) + + return SignificanceResult(statistic, pval) + + +@dataclass +class QuantileTestResult: + r""" + Result of `scipy.stats.quantile_test`. + + Attributes + ---------- + statistic: float + The statistic used to calculate the p-value; either ``T1``, the + number of observations less than or equal to the hypothesized quantile, + or ``T2``, the number of observations strictly less than the + hypothesized quantile. Two test statistics are required to handle the + possibility the data was generated from a discrete or mixed + distribution. + + statistic_type : int + ``1`` or ``2`` depending on which of ``T1`` or ``T2`` was used to + calculate the p-value respectively. ``T1`` corresponds to the + ``"greater"`` alternative hypothesis and ``T2`` to the ``"less"``. For + the ``"two-sided"`` case, the statistic type that leads to smallest + p-value is used. For significant tests, ``statistic_type = 1`` means + there is evidence that the population quantile is significantly greater + than the hypothesized value and ``statistic_type = 2`` means there is + evidence that it is significantly less than the hypothesized value. + + pvalue : float + The p-value of the hypothesis test. + """ + statistic: float + statistic_type: int + pvalue: float + _alternative: list[str] = field(repr=False) + _x : np.ndarray = field(repr=False) + _p : float = field(repr=False) + + def confidence_interval(self, confidence_level=0.95): + """ + Compute the confidence interval of the quantile. + + Parameters + ---------- + confidence_level : float, default: 0.95 + Confidence level for the computed confidence interval + of the quantile. Default is 0.95. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence interval. + + Examples + -------- + >>> import numpy as np + >>> import scipy.stats as stats + >>> p = 0.75 # quantile of interest + >>> q = 0 # hypothesized value of the quantile + >>> x = np.exp(np.arange(0, 1.01, 0.01)) + >>> res = stats.quantile_test(x, q=q, p=p, alternative='less') + >>> lb, ub = res.confidence_interval() + >>> lb, ub + (-inf, 2.293318740264183) + >>> res = stats.quantile_test(x, q=q, p=p, alternative='two-sided') + >>> lb, ub = res.confidence_interval(0.9) + >>> lb, ub + (1.9542373206359396, 2.293318740264183) + """ + + alternative = self._alternative + p = self._p + x = np.sort(self._x) + n = len(x) + bd = stats.binom(n, p) + + if confidence_level <= 0 or confidence_level >= 1: + message = "`confidence_level` must be a number between 0 and 1." + raise ValueError(message) + + low_index = np.nan + high_index = np.nan + + if alternative == 'less': + p = 1 - confidence_level + low = -np.inf + high_index = int(bd.isf(p)) + high = x[high_index] if high_index < n else np.nan + elif alternative == 'greater': + p = 1 - confidence_level + low_index = int(bd.ppf(p)) - 1 + low = x[low_index] if low_index >= 0 else np.nan + high = np.inf + elif alternative == 'two-sided': + p = (1 - confidence_level) / 2 + low_index = int(bd.ppf(p)) - 1 + low = x[low_index] if low_index >= 0 else np.nan + high_index = int(bd.isf(p)) + high = x[high_index] if high_index < n else np.nan + + return ConfidenceInterval(low, high) + + +def quantile_test_iv(x, q, p, alternative): + + x = np.atleast_1d(x) + message = '`x` must be a one-dimensional array of numbers.' + if x.ndim != 1 or not np.issubdtype(x.dtype, np.number): + raise ValueError(message) + + q = np.array(q)[()] + message = "`q` must be a scalar." + if q.ndim != 0 or not np.issubdtype(q.dtype, np.number): + raise ValueError(message) + + p = np.array(p)[()] + message = "`p` must be a float strictly between 0 and 1." + if p.ndim != 0 or p >= 1 or p <= 0: + raise ValueError(message) + + alternatives = {'two-sided', 'less', 'greater'} + message = f"`alternative` must be one of {alternatives}" + if alternative not in alternatives: + raise ValueError(message) + + return x, q, p, alternative + + +def quantile_test(x, *, q=0, p=0.5, alternative='two-sided'): + r""" + Perform a quantile test and compute a confidence interval of the quantile. + + This function tests the null hypothesis that `q` is the value of the + quantile associated with probability `p` of the population underlying + sample `x`. For example, with default parameters, it tests that the + median of the population underlying `x` is zero. The function returns an + object including the test statistic, a p-value, and a method for computing + the confidence interval around the quantile. + + Parameters + ---------- + x : array_like + A one-dimensional sample. + q : float, default: 0 + The hypothesized value of the quantile. + p : float, default: 0.5 + The probability associated with the quantile; i.e. the proportion of + the population less than `q` is `p`. Must be strictly between 0 and + 1. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the quantile associated with the probability `p` + is not `q`. + * 'less': the quantile associated with the probability `p` is less + than `q`. + * 'greater': the quantile associated with the probability `p` is + greater than `q`. + + Returns + ------- + result : QuantileTestResult + An object with the following attributes: + + statistic : float + One of two test statistics that may be used in the quantile test. + The first test statistic, ``T1``, is the proportion of samples in + `x` that are less than or equal to the hypothesized quantile + `q`. The second test statistic, ``T2``, is the proportion of + samples in `x` that are strictly less than the hypothesized + quantile `q`. + + When ``alternative = 'greater'``, ``T1`` is used to calculate the + p-value and ``statistic`` is set to ``T1``. + + When ``alternative = 'less'``, ``T2`` is used to calculate the + p-value and ``statistic`` is set to ``T2``. + + When ``alternative = 'two-sided'``, both ``T1`` and ``T2`` are + considered, and the one that leads to the smallest p-value is used. + + statistic_type : int + Either `1` or `2` depending on which of ``T1`` or ``T2`` was + used to calculate the p-value. + + pvalue : float + The p-value associated with the given alternative. + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the the + population quantile associated with the probability `p`. The + confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. Values are `nan` when there are + not enough observations to compute the confidence interval at + the desired confidence. + + Notes + ----- + This test and its method for computing confidence intervals are + non-parametric. They are valid if and only if the observations are i.i.d. + + The implementation of the test follows Conover [1]_. Two test statistics + are considered. + + ``T1``: The number of observations in `x` less than or equal to `q`. + + ``T1 = (x <= q).sum()`` + + ``T2``: The number of observations in `x` strictly less than `q`. + + ``T2 = (x < q).sum()`` + + The use of two test statistics is necessary to handle the possibility that + `x` was generated from a discrete or mixed distribution. + + The null hypothesis for the test is: + + H0: The :math:`p^{\mathrm{th}}` population quantile is `q`. + + and the null distribution for each test statistic is + :math:`\mathrm{binom}\left(n, p\right)`. When ``alternative='less'``, + the alternative hypothesis is: + + H1: The :math:`p^{\mathrm{th}}` population quantile is less than `q`. + + and the p-value is the probability that the binomial random variable + + .. math:: + Y \sim \mathrm{binom}\left(n, p\right) + + is greater than or equal to the observed value ``T2``. + + When ``alternative='greater'``, the alternative hypothesis is: + + H1: The :math:`p^{\mathrm{th}}` population quantile is greater than `q` + + and the p-value is the probability that the binomial random variable Y + is less than or equal to the observed value ``T1``. + + When ``alternative='two-sided'``, the alternative hypothesis is + + H1: `q` is not the :math:`p^{\mathrm{th}}` population quantile. + + and the p-value is twice the smaller of the p-values for the ``'less'`` + and ``'greater'`` cases. Both of these p-values can exceed 0.5 for the same + data, so the value is clipped into the interval :math:`[0, 1]`. + + The approach for confidence intervals is attributed to Thompson [2]_ and + later proven to be applicable to any set of i.i.d. samples [3]_. The + computation is based on the observation that the probability of a quantile + :math:`q` to be larger than any observations :math:`x_m (1\leq m \leq N)` + can be computed as + + .. math:: + + \mathbb{P}(x_m \leq q) = 1 - \sum_{k=0}^{m-1} \binom{N}{k} + q^k(1-q)^{N-k} + + By default, confidence intervals are computed for a 95% confidence level. + A common interpretation of a 95% confidence intervals is that if i.i.d. + samples are drawn repeatedly from the same population and confidence + intervals are formed each time, the confidence interval will contain the + true value of the specified quantile in approximately 95% of trials. + + A similar function is available in the QuantileNPCI R package [4]_. The + foundation is the same, but it computes the confidence interval bounds by + doing interpolations between the sample values, whereas this function uses + only sample values as bounds. Thus, ``quantile_test.confidence_interval`` + returns more conservative intervals (i.e., larger). + + The same computation of confidence intervals for quantiles is included in + the confintr package [5]_. + + Two-sided confidence intervals are not guaranteed to be optimal; i.e., + there may exist a tighter interval that may contain the quantile of + interest with probability larger than the confidence level. + Without further assumption on the samples (e.g., the nature of the + underlying distribution), the one-sided intervals are optimally tight. + + References + ---------- + .. [1] W. J. Conover. Practical Nonparametric Statistics, 3rd Ed. 1999. + .. [2] W. R. Thompson, "On Confidence Ranges for the Median and Other + Expectation Distributions for Populations of Unknown Distribution + Form," The Annals of Mathematical Statistics, vol. 7, no. 3, + pp. 122-128, 1936, Accessed: Sep. 18, 2019. [Online]. Available: + https://www.jstor.org/stable/2957563. + .. [3] H. A. David and H. N. Nagaraja, "Order Statistics in Nonparametric + Inference" in Order Statistics, John Wiley & Sons, Ltd, 2005, pp. + 159-170. Available: + https://onlinelibrary.wiley.com/doi/10.1002/0471722162.ch7. + .. [4] N. Hutson, A. Hutson, L. Yan, "QuantileNPCI: Nonparametric + Confidence Intervals for Quantiles," R package, + https://cran.r-project.org/package=QuantileNPCI + .. [5] M. Mayer, "confintr: Confidence Intervals," R package, + https://cran.r-project.org/package=confintr + + + Examples + -------- + + Suppose we wish to test the null hypothesis that the median of a population + is equal to 0.5. We choose a confidence level of 99%; that is, we will + reject the null hypothesis in favor of the alternative if the p-value is + less than 0.01. + + When testing random variates from the standard uniform distribution, which + has a median of 0.5, we expect the data to be consistent with the null + hypothesis most of the time. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(6981396440634228121) + >>> rvs = stats.uniform.rvs(size=100, random_state=rng) + >>> stats.quantile_test(rvs, q=0.5, p=0.5) + QuantileTestResult(statistic=45, statistic_type=1, pvalue=0.36820161732669576) + + As expected, the p-value is not below our threshold of 0.01, so + we cannot reject the null hypothesis. + + When testing data from the standard *normal* distribution, which has a + median of 0, we would expect the null hypothesis to be rejected. + + >>> rvs = stats.norm.rvs(size=100, random_state=rng) + >>> stats.quantile_test(rvs, q=0.5, p=0.5) + QuantileTestResult(statistic=67, statistic_type=2, pvalue=0.0008737198369123724) + + Indeed, the p-value is lower than our threshold of 0.01, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the median + of the population is *not* equal to 0.5. + + However, suppose we were to test the null hypothesis against the + one-sided alternative that the median of the population is *greater* than + 0.5. Since the median of the standard normal is less than 0.5, we would not + expect the null hypothesis to be rejected. + + >>> stats.quantile_test(rvs, q=0.5, p=0.5, alternative='greater') + QuantileTestResult(statistic=67, statistic_type=1, pvalue=0.9997956114162866) + + Unsurprisingly, with a p-value greater than our threshold, we would not + reject the null hypothesis in favor of the chosen alternative. + + The quantile test can be used for any quantile, not only the median. For + example, we can test whether the third quartile of the distribution + underlying the sample is greater than 0.6. + + >>> rvs = stats.uniform.rvs(size=100, random_state=rng) + >>> stats.quantile_test(rvs, q=0.6, p=0.75, alternative='greater') + QuantileTestResult(statistic=64, statistic_type=1, pvalue=0.00940696592998271) + + The p-value is lower than the threshold. We reject the null hypothesis in + favor of the alternative: the third quartile of the distribution underlying + our sample is greater than 0.6. + + `quantile_test` can also compute confidence intervals for any quantile. + + >>> rvs = stats.norm.rvs(size=100, random_state=rng) + >>> res = stats.quantile_test(rvs, q=0.6, p=0.75) + >>> ci = res.confidence_interval(confidence_level=0.95) + >>> ci + ConfidenceInterval(low=0.284491604437432, high=0.8912531024914844) + + When testing a one-sided alternative, the confidence interval contains + all observations such that if passed as `q`, the p-value of the + test would be greater than 0.05, and therefore the null hypothesis + would not be rejected. For example: + + >>> rvs.sort() + >>> q, p, alpha = 0.6, 0.75, 0.95 + >>> res = stats.quantile_test(rvs, q=q, p=p, alternative='less') + >>> ci = res.confidence_interval(confidence_level=alpha) + >>> for x in rvs[rvs <= ci.high]: + ... res = stats.quantile_test(rvs, q=x, p=p, alternative='less') + ... assert res.pvalue > 1-alpha + >>> for x in rvs[rvs > ci.high]: + ... res = stats.quantile_test(rvs, q=x, p=p, alternative='less') + ... assert res.pvalue < 1-alpha + + Also, if a 95% confidence interval is repeatedly generated for random + samples, the confidence interval will contain the true quantile value in + approximately 95% of replications. + + >>> dist = stats.rayleigh() # our "unknown" distribution + >>> p = 0.2 + >>> true_stat = dist.ppf(p) # the true value of the statistic + >>> n_trials = 1000 + >>> quantile_ci_contains_true_stat = 0 + >>> for i in range(n_trials): + ... data = dist.rvs(size=100, random_state=rng) + ... res = stats.quantile_test(data, p=p) + ... ci = res.confidence_interval(0.95) + ... if ci[0] < true_stat < ci[1]: + ... quantile_ci_contains_true_stat += 1 + >>> quantile_ci_contains_true_stat >= 950 + True + + This works with any distribution and any quantile, as long as the samples + are i.i.d. + """ + # Implementation carefully follows [1] 3.2 + # "H0: the p*th quantile of X is x*" + # To facilitate comparison with [1], we'll use variable names that + # best match Conover's notation + X, x_star, p_star, H1 = quantile_test_iv(x, q, p, alternative) + + # "We will use two test statistics in this test. Let T1 equal " + # "the number of observations less than or equal to x*, and " + # "let T2 equal the number of observations less than x*." + T1 = (X <= x_star).sum() + T2 = (X < x_star).sum() + + # "The null distribution of the test statistics T1 and T2 is " + # "the binomial distribution, with parameters n = sample size, and " + # "p = p* as given in the null hypothesis.... Y has the binomial " + # "distribution with parameters n and p*." + n = len(X) + Y = stats.binom(n=n, p=p_star) + + # "H1: the p* population quantile is less than x*" + if H1 == 'less': + # "The p-value is the probability that a binomial random variable Y " + # "is greater than *or equal to* the observed value of T2...using p=p*" + pvalue = Y.sf(T2-1) # Y.pmf(T2) + Y.sf(T2) + statistic = T2 + statistic_type = 2 + # "H1: the p* population quantile is greater than x*" + elif H1 == 'greater': + # "The p-value is the probability that a binomial random variable Y " + # "is less than or equal to the observed value of T1... using p = p*" + pvalue = Y.cdf(T1) + statistic = T1 + statistic_type = 1 + # "H1: x* is not the p*th population quantile" + elif H1 == 'two-sided': + # "The p-value is twice the smaller of the probabilities that a + # binomial random variable Y is less than or equal to the observed + # value of T1 or greater than or equal to the observed value of T2 + # using p=p*." + # Note: both one-sided p-values can exceed 0.5 for the same data, so + # `clip` + pvalues = [Y.cdf(T1), Y.sf(T2 - 1)] # [greater, less] + sorted_idx = np.argsort(pvalues) + pvalue = np.clip(2*pvalues[sorted_idx[0]], 0, 1) + if sorted_idx[0]: + statistic, statistic_type = T2, 2 + else: + statistic, statistic_type = T1, 1 + + return QuantileTestResult( + statistic=statistic, + statistic_type=statistic_type, + pvalue=pvalue, + _alternative=H1, + _x=X, + _p=p_star + ) + + +##################################### +# STATISTICAL DISTANCES # +##################################### + + +def wasserstein_distance_nd(u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute the Wasserstein-1 distance between two N-D discrete distributions. + + The Wasserstein distance, also called the Earth mover's distance or the + optimal transport distance, is a similarity metric between two probability + distributions [1]_. In the discrete case, the Wasserstein distance can be + understood as the cost of an optimal transport plan to convert one + distribution into the other. The cost is calculated as the product of the + amount of probability mass being moved and the distance it is being moved. + A brief and intuitive introduction can be found at [2]_. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + u_values : 2d array_like + A sample from a probability distribution or the support (set of all + possible values) of a probability distribution. Each element along + axis 0 is an observation or possible value, and axis 1 represents the + dimensionality of the distribution; i.e., each row is a vector + observation or possible value. + + v_values : 2d array_like + A sample from or the support of a second distribution. + + u_weights, v_weights : 1d array_like, optional + Weights or counts corresponding with the sample or probability masses + corresponding with the support values. Sum of elements must be positive + and finite. If unspecified, each value is assigned the same weight. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + Given two probability mass functions, :math:`u` + and :math:`v`, the first Wasserstein distance between the distributions + using the Euclidean norm is: + + .. math:: + + l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int \| x-y \|_2 \mathrm{d} \pi (x, y) + + where :math:`\Gamma (u, v)` is the set of (probability) distributions on + :math:`\mathbb{R}^n \times \mathbb{R}^n` whose marginals are :math:`u` and + :math:`v` on the first and second factors respectively. For a given value + :math:`x`, :math:`u(x)` gives the probability of :math:`u` at position + :math:`x`, and the same for :math:`v(x)`. + + This is also called the optimal transport problem or the Monge problem. + Let the finite point sets :math:`\{x_i\}` and :math:`\{y_j\}` denote + the support set of probability mass function :math:`u` and :math:`v` + respectively. The Monge problem can be expressed as follows, + + Let :math:`\Gamma` denote the transport plan, :math:`D` denote the + distance matrix and, + + .. math:: + + x = \text{vec}(\Gamma) \\ + c = \text{vec}(D) \\ + b = \begin{bmatrix} + u\\ + v\\ + \end{bmatrix} + + The :math:`\text{vec}()` function denotes the Vectorization function + that transforms a matrix into a column vector by vertically stacking + the columns of the matrix. + The transport plan :math:`\Gamma` is a matrix :math:`[\gamma_{ij}]` in + which :math:`\gamma_{ij}` is a positive value representing the amount of + probability mass transported from :math:`u(x_i)` to :math:`v(y_i)`. + Summing over the rows of :math:`\Gamma` should give the source distribution + :math:`u` : :math:`\sum_j \gamma_{ij} = u(x_i)` holds for all :math:`i` + and summing over the columns of :math:`\Gamma` should give the target + distribution :math:`v`: :math:`\sum_i \gamma_{ij} = v(y_j)` holds for all + :math:`j`. + The distance matrix :math:`D` is a matrix :math:`[d_{ij}]`, in which + :math:`d_{ij} = d(x_i, y_j)`. + + Given :math:`\Gamma`, :math:`D`, :math:`b`, the Monge problem can be + transformed into a linear programming problem by + taking :math:`A x = b` as constraints and :math:`z = c^T x` as minimization + target (sum of costs) , where matrix :math:`A` has the form + + .. math:: + + \begin{array} {rrrr|rrrr|r|rrrr} + 1 & 1 & \dots & 1 & 0 & 0 & \dots & 0 & \dots & 0 & 0 & \dots & + 0 \cr + 0 & 0 & \dots & 0 & 1 & 1 & \dots & 1 & \dots & 0 & 0 &\dots & + 0 \cr + \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots + & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr + 0 & 0 & \dots & 0 & 0 & 0 & \dots & 0 & \dots & 1 & 1 & \dots & + 1 \cr \hline + + 1 & 0 & \dots & 0 & 1 & 0 & \dots & \dots & \dots & 1 & 0 & \dots & + 0 \cr + 0 & 1 & \dots & 0 & 0 & 1 & \dots & \dots & \dots & 0 & 1 & \dots & + 0 \cr + \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & + \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr + 0 & 0 & \dots & 1 & 0 & 0 & \dots & 1 & \dots & 0 & 0 & \dots & 1 + \end{array} + + By solving the dual form of the above linear programming problem (with + solution :math:`y^*`), the Wasserstein distance :math:`l_1 (u, v)` can + be computed as :math:`b^T y^*`. + + The above solution is inspired by Vincent Herrmann's blog [3]_ . For a + more thorough explanation, see [4]_ . + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] "Wasserstein metric", + https://en.wikipedia.org/wiki/Wasserstein_metric + .. [2] Lili Weng, "What is Wasserstein distance?", Lil'log, + https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-wasserstein-distance. + .. [3] Hermann, Vincent. "Wasserstein GAN and the Kantorovich-Rubinstein + Duality". https://vincentherrmann.github.io/blog/wasserstein/. + .. [4] Peyré, Gabriel, and Marco Cuturi. "Computational optimal + transport." Center for Research in Economics and Statistics + Working Papers 2017-86 (2017). + + See Also + -------- + wasserstein_distance: Compute the Wasserstein-1 distance between two + 1D discrete distributions. + + Examples + -------- + Compute the Wasserstein distance between two three-dimensional samples, + each with two observations. + + >>> from scipy.stats import wasserstein_distance_nd + >>> wasserstein_distance_nd([[0, 2, 3], [1, 2, 5]], [[3, 2, 3], [4, 2, 5]]) + 3.0 + + Compute the Wasserstein distance between two two-dimensional distributions + with three and two weighted observations, respectively. + + >>> wasserstein_distance_nd([[0, 2.75], [2, 209.3], [0, 0]], + ... [[0.2, 0.322], [4.5, 25.1808]], + ... [0.4, 5.2, 0.114], [0.8, 1.5]) + 174.15840245217169 + """ + m, n = len(u_values), len(v_values) + u_values = asarray(u_values) + v_values = asarray(v_values) + + if u_values.ndim > 2 or v_values.ndim > 2: + raise ValueError('Invalid input values. The inputs must have either ' + 'one or two dimensions.') + # if dimensions are not equal throw error + if u_values.ndim != v_values.ndim: + raise ValueError('Invalid input values. Dimensions of inputs must be ' + 'equal.') + # if data is 1D then call the cdf_distance function + if u_values.ndim == 1 and v_values.ndim == 1: + return _cdf_distance(1, u_values, v_values, u_weights, v_weights) + + u_values, u_weights = _validate_distribution(u_values, u_weights) + v_values, v_weights = _validate_distribution(v_values, v_weights) + # if number of columns is not equal throw error + if u_values.shape[1] != v_values.shape[1]: + raise ValueError('Invalid input values. If two-dimensional, ' + '`u_values` and `v_values` must have the same ' + 'number of columns.') + + # if data contains np.inf then return inf or nan + if np.any(np.isinf(u_values)) ^ np.any(np.isinf(v_values)): + return np.inf + elif np.any(np.isinf(u_values)) and np.any(np.isinf(v_values)): + return np.nan + + # create constraints + A_upper_part = sparse.block_diag((np.ones((1, n)), ) * m) + A_lower_part = sparse.hstack((sparse.eye(n), ) * m) + # sparse constraint matrix of size (m + n)*(m * n) + A = sparse.vstack((A_upper_part, A_lower_part)) + A = sparse.coo_array(A) + + # get cost matrix + D = distance_matrix(u_values, v_values, p=2) + cost = D.ravel() + + # create the minimization target + p_u = np.full(m, 1/m) if u_weights is None else u_weights/np.sum(u_weights) + p_v = np.full(n, 1/n) if v_weights is None else v_weights/np.sum(v_weights) + b = np.concatenate((p_u, p_v), axis=0) + + # solving LP + constraints = LinearConstraint(A=A.T, ub=cost) + opt_res = milp(c=-b, constraints=constraints, bounds=(-np.inf, np.inf)) + return -opt_res.fun + + +def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute the Wasserstein-1 distance between two 1D discrete distributions. + + The Wasserstein distance, also called the Earth mover's distance or the + optimal transport distance, is a similarity metric between two probability + distributions [1]_. In the discrete case, the Wasserstein distance can be + understood as the cost of an optimal transport plan to convert one + distribution into the other. The cost is calculated as the product of the + amount of probability mass being moved and the distance it is being moved. + A brief and intuitive introduction can be found at [2]_. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + u_values : 1d array_like + A sample from a probability distribution or the support (set of all + possible values) of a probability distribution. Each element is an + observation or possible value. + + v_values : 1d array_like + A sample from or the support of a second distribution. + + u_weights, v_weights : 1d array_like, optional + Weights or counts corresponding with the sample or probability masses + corresponding with the support values. Sum of elements must be positive + and finite. If unspecified, each value is assigned the same weight. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + Given two 1D probability mass functions, :math:`u` and :math:`v`, the first + Wasserstein distance between the distributions is: + + .. math:: + + l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times + \mathbb{R}} |x-y| \mathrm{d} \pi (x, y) + + where :math:`\Gamma (u, v)` is the set of (probability) distributions on + :math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and + :math:`v` on the first and second factors respectively. For a given value + :math:`x`, :math:`u(x)` gives the probability of :math:`u` at position + :math:`x`, and the same for :math:`v(x)`. + + If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and + :math:`v`, this distance also equals to: + + .. math:: + + l_1(u, v) = \int_{-\infty}^{+\infty} |U-V| + + See [3]_ for a proof of the equivalence of both definitions. + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric + .. [2] Lili Weng, "What is Wasserstein distance?", Lil'log, + https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-wasserstein-distance. + .. [3] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related + Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`. + + See Also + -------- + wasserstein_distance_nd: Compute the Wasserstein-1 distance between two N-D + discrete distributions. + + Examples + -------- + >>> from scipy.stats import wasserstein_distance + >>> wasserstein_distance([0, 1, 3], [5, 6, 8]) + 5.0 + >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2]) + 0.25 + >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4], + ... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5]) + 4.0781331438047861 + + """ + return _cdf_distance(1, u_values, v_values, u_weights, v_weights) + + +def energy_distance(u_values, v_values, u_weights=None, v_weights=None): + r"""Compute the energy distance between two 1D distributions. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The energy distance between two distributions :math:`u` and :math:`v`, whose + respective CDFs are :math:`U` and :math:`V`, equals to: + + .. math:: + + D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| - + \mathbb E|Y - Y'| \right)^{1/2} + + where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are + independent random variables whose probability distribution is :math:`u` + (resp. :math:`v`). + + Sometimes the square of this quantity is referred to as the "energy + distance" (e.g. in [2]_, [4]_), but as noted in [1]_ and [3]_, only the + definition above satisfies the axioms of a distance function (metric). + + As shown in [2]_, for one-dimensional real-valued variables, the energy + distance is linked to the non-distribution-free version of the Cramér-von + Mises distance: + + .. math:: + + D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2 + \right)^{1/2} + + Note that the common Cramér-von Mises criterion uses the distribution-free + version of the distance. See [2]_ (section 2), for more details about both + versions of the distance. + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews: + Computational Statistics, 8(1):27-38 (2015). + .. [2] Szekely "E-statistics: The energy of statistical samples." Bowling + Green State University, Department of Mathematics and Statistics, + Technical Report 02-16 (2002). + .. [3] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance + .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, + Munos "The Cramer Distance as a Solution to Biased Wasserstein + Gradients" (2017). :arXiv:`1705.10743`. + + Examples + -------- + >>> from scipy.stats import energy_distance + >>> energy_distance([0], [2]) + 2.0000000000000004 + >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2]) + 1.0000000000000002 + >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ], + ... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8]) + 0.88003340976158217 + + """ + return np.sqrt(2) * _cdf_distance(2, u_values, v_values, + u_weights, v_weights) + + +def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute, between two one-dimensional distributions :math:`u` and + :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the + statistical distance that is defined as: + + .. math:: + + l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p} + + p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2 + gives the energy distance. + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, + Munos "The Cramer Distance as a Solution to Biased Wasserstein + Gradients" (2017). :arXiv:`1705.10743`. + + """ + u_values, u_weights = _validate_distribution(u_values, u_weights) + v_values, v_weights = _validate_distribution(v_values, v_weights) + + u_sorter = np.argsort(u_values) + v_sorter = np.argsort(v_values) + + all_values = np.concatenate((u_values, v_values)) + all_values.sort(kind='mergesort') + + # Compute the differences between pairs of successive values of u and v. + deltas = np.diff(all_values) + + # Get the respective positions of the values of u and v among the values of + # both distributions. + u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right') + v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right') + + # Calculate the CDFs of u and v using their weights, if specified. + if u_weights is None: + u_cdf = u_cdf_indices / u_values.size + else: + u_sorted_cumweights = np.concatenate(([0], + np.cumsum(u_weights[u_sorter]))) + u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1] + + if v_weights is None: + v_cdf = v_cdf_indices / v_values.size + else: + v_sorted_cumweights = np.concatenate(([0], + np.cumsum(v_weights[v_sorter]))) + v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1] + + # Compute the value of the integral based on the CDFs. + # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead + # of about 15%. + if p == 1: + return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas)) + if p == 2: + return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas))) + return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p), + deltas)), 1/p) + + +def _validate_distribution(values, weights): + """ + Validate the values and weights from a distribution input of `cdf_distance` + and return them as ndarray objects. + + Parameters + ---------- + values : array_like + Values observed in the (empirical) distribution. + weights : array_like + Weight for each value. + + Returns + ------- + values : ndarray + Values as ndarray. + weights : ndarray + Weights as ndarray. + + """ + # Validate the value array. + values = np.asarray(values, dtype=float) + if len(values) == 0: + raise ValueError("Distribution can't be empty.") + + # Validate the weight array, if specified. + if weights is not None: + weights = np.asarray(weights, dtype=float) + if len(weights) != len(values): + raise ValueError('Value and weight array-likes for the same ' + 'empirical distribution must be of the same size.') + if np.any(weights < 0): + raise ValueError('All weights must be non-negative.') + if not 0 < np.sum(weights) < np.inf: + raise ValueError('Weight array-like sum must be positive and ' + 'finite. Set as None for an equal distribution of ' + 'weight.') + + return values, weights + + return values, None + + +##################################### +# SUPPORT FUNCTIONS # +##################################### + +RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts')) + + +@_deprecated("`scipy.stats.find_repeats` is deprecated as of SciPy 1.15.0 " + "and will be removed in SciPy 1.17.0. Please use " + "`numpy.unique`/`numpy.unique_counts` instead.") +def find_repeats(arr): + """Find repeats and repeat counts. + + .. deprecated:: 1.15.0 + + This function is deprecated as of SciPy 1.15.0 and will be removed + in SciPy 1.17.0. Please use `numpy.unique` / `numpy.unique_counts` instead. + + Parameters + ---------- + arr : array_like + Input array. This is cast to float64. + + Returns + ------- + values : ndarray + The unique values from the (flattened) input that are repeated. + + counts : ndarray + Number of times the corresponding 'value' is repeated. + + Notes + ----- + In numpy >= 1.9 `numpy.unique` provides similar functionality. The main + difference is that `find_repeats` only returns repeated values. + + Examples + -------- + >>> from scipy import stats + >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5]) + RepeatedResults(values=array([2.]), counts=array([4])) + + >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) + RepeatedResults(values=array([4., 5.]), counts=array([2, 2])) + + """ + # Note: always copies. + return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64))) + + +def _sum_of_squares(a, axis=0): + """Square each element of the input array, and return the sum(s) of that. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + sum_of_squares : ndarray + The sum along the given axis for (a**2). + + See Also + -------- + _square_of_sums : The square(s) of the sum(s) (the opposite of + `_sum_of_squares`). + + """ + a, axis = _chk_asarray(a, axis) + return np.sum(a*a, axis) + + +def _square_of_sums(a, axis=0): + """Sum elements of the input array, and return the square(s) of that sum. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + square_of_sums : float or ndarray + The square of the sum over `axis`. + + See Also + -------- + _sum_of_squares : The sum of squares (the opposite of `square_of_sums`). + + """ + a, axis = _chk_asarray(a, axis) + s = np.sum(a, axis) + if not np.isscalar(s): + return s.astype(float) * s + else: + return float(s) * s + + +def rankdata(a, method='average', *, axis=None, nan_policy='propagate'): + """Assign ranks to data, dealing with ties appropriately. + + By default (``axis=None``), the data array is first flattened, and a flat + array of ranks is returned. Separately reshape the rank array to the + shape of the data array if desired (see Examples). + + Ranks begin at 1. The `method` argument controls how ranks are assigned + to equal values. See [1]_ for further discussion of ranking methods. + + Parameters + ---------- + a : array_like + The array of values to be ranked. + method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional + The method used to assign ranks to tied elements. + The following methods are available (default is 'average'): + + * 'average': The average of the ranks that would have been assigned to + all the tied values is assigned to each value. + * 'min': The minimum of the ranks that would have been assigned to all + the tied values is assigned to each value. (This is also + referred to as "competition" ranking.) + * 'max': The maximum of the ranks that would have been assigned to all + the tied values is assigned to each value. + * 'dense': Like 'min', but the rank of the next highest element is + assigned the rank immediately after those assigned to the tied + elements. + * 'ordinal': All values are given a distinct rank, corresponding to + the order that the values occur in `a`. + axis : {None, int}, optional + Axis along which to perform the ranking. If ``None``, the data array + is first flattened. + nan_policy : {'propagate', 'omit', 'raise'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': propagates nans through the rank calculation + * 'omit': performs the calculations ignoring nan values + * 'raise': raises an error + + .. note:: + + When `nan_policy` is 'propagate', the output is an array of *all* + nans because ranks relative to nans in the input are undefined. + When `nan_policy` is 'omit', nans in `a` are ignored when ranking + the other values, and the corresponding locations of the output + are nan. + + .. versionadded:: 1.10 + + Returns + ------- + ranks : ndarray + An array of size equal to the size of `a`, containing rank + scores. + + References + ---------- + .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import rankdata + >>> rankdata([0, 2, 3, 2]) + array([ 1. , 2.5, 4. , 2.5]) + >>> rankdata([0, 2, 3, 2], method='min') + array([ 1, 2, 4, 2]) + >>> rankdata([0, 2, 3, 2], method='max') + array([ 1, 3, 4, 3]) + >>> rankdata([0, 2, 3, 2], method='dense') + array([ 1, 2, 3, 2]) + >>> rankdata([0, 2, 3, 2], method='ordinal') + array([ 1, 2, 4, 3]) + >>> rankdata([[0, 2], [3, 2]]).reshape(2,2) + array([[1. , 2.5], + [4. , 2.5]]) + >>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1) + array([[1. , 2.5, 2.5], + [2. , 1. , 3. ]]) + >>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="propagate") + array([nan, nan, nan, nan, nan, nan]) + >>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="omit") + array([ 2., 3., 4., nan, 1., nan]) + + """ + methods = ('average', 'min', 'max', 'dense', 'ordinal') + if method not in methods: + raise ValueError(f'unknown method "{method}"') + + x = np.asarray(a) + + if axis is None: + x = x.ravel() + axis = -1 + + if x.size == 0: + dtype = float if method == 'average' else np.dtype("long") + return np.empty(x.shape, dtype=dtype) + + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + x = np.swapaxes(x, axis, -1) + ranks = _rankdata(x, method) + + if contains_nan: + i_nan = (np.isnan(x) if nan_policy == 'omit' + else np.isnan(x).any(axis=-1)) + ranks = ranks.astype(float, copy=False) + ranks[i_nan] = np.nan + + ranks = np.swapaxes(ranks, axis, -1) + return ranks + + +def _order_ranks(ranks, j): + # Reorder ascending order `ranks` according to `j` + ordered_ranks = np.empty(j.shape, dtype=ranks.dtype) + np.put_along_axis(ordered_ranks, j, ranks, axis=-1) + return ordered_ranks + + +def _rankdata(x, method, return_ties=False): + # Rank data `x` by desired `method`; `return_ties` if desired + shape = x.shape + + # Get sort order + kind = 'mergesort' if method == 'ordinal' else 'quicksort' + j = np.argsort(x, axis=-1, kind=kind) + ordinal_ranks = np.broadcast_to(np.arange(1, shape[-1]+1, dtype=int), shape) + + # Ordinal ranks is very easy because ties don't matter. We're done. + if method == 'ordinal': + return _order_ranks(ordinal_ranks, j) # never return ties + + # Sort array + y = np.take_along_axis(x, j, axis=-1) + # Logical indices of unique elements + i = np.concatenate([np.ones(shape[:-1] + (1,), dtype=np.bool_), + y[..., :-1] != y[..., 1:]], axis=-1) + + # Integer indices of unique elements + indices = np.arange(y.size)[i.ravel()] + # Counts of unique elements + counts = np.diff(indices, append=y.size) + + # Compute `'min'`, `'max'`, and `'mid'` ranks of unique elements + if method == 'min': + ranks = ordinal_ranks[i] + elif method == 'max': + ranks = ordinal_ranks[i] + counts - 1 + elif method == 'average': + ranks = ordinal_ranks[i] + (counts - 1)/2 + elif method == 'dense': + ranks = np.cumsum(i, axis=-1)[i] + + ranks = np.repeat(ranks, counts).reshape(shape) + ranks = _order_ranks(ranks, j) + + if return_ties: + # Tie information is returned in a format that is useful to functions that + # rely on this (private) function. Example: + # >>> x = np.asarray([3, 2, 1, 2, 2, 2, 1]) + # >>> _, t = _rankdata(x, 'average', return_ties=True) + # >>> t # array([2., 0., 4., 0., 0., 0., 1.]) # two 1s, four 2s, and one 3 + # Unlike ranks, tie counts are *not* reordered to correspond with the order of + # the input; e.g. the number of appearances of the lowest rank element comes + # first. This is a useful format because: + # - The shape of the result is the shape of the input. Different slices can + # have different numbers of tied elements but not result in a ragged array. + # - Functions that use `t` usually don't need to which each element of the + # original array is associated with each tie count; they perform a reduction + # over the tie counts onnly. The tie counts are naturally computed in a + # sorted order, so this does not unnecessarily reorder them. + # - One exception is `wilcoxon`, which needs the number of zeros. Zeros always + # have the lowest rank, so it is easy to find them at the zeroth index. + t = np.zeros(shape, dtype=float) + t[i] = counts + return ranks, t + return ranks + + +def expectile(a, alpha=0.5, *, weights=None): + r"""Compute the expectile at the specified level. + + Expectiles are a generalization of the expectation in the same way as + quantiles are a generalization of the median. The expectile at level + `alpha = 0.5` is the mean (average). See Notes for more details. + + Parameters + ---------- + a : array_like + Array containing numbers whose expectile is desired. + alpha : float, default: 0.5 + The level of the expectile; e.g., ``alpha=0.5`` gives the mean. + weights : array_like, optional + An array of weights associated with the values in `a`. + The `weights` must be broadcastable to the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + An integer valued weight element acts like repeating the corresponding + observation in `a` that many times. See Notes for more details. + + Returns + ------- + expectile : ndarray + The empirical expectile at level `alpha`. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.quantile : Quantile + + Notes + ----- + In general, the expectile at level :math:`\alpha` of a random variable + :math:`X` with cumulative distribution function (CDF) :math:`F` is given + by the unique solution :math:`t` of: + + .. math:: + + \alpha E((X - t)_+) = (1 - \alpha) E((t - X)_+) \,. + + Here, :math:`(x)_+ = \max(0, x)` is the positive part of :math:`x`. + This equation can be equivalently written as: + + .. math:: + + \alpha \int_t^\infty (x - t)\mathrm{d}F(x) + = (1 - \alpha) \int_{-\infty}^t (t - x)\mathrm{d}F(x) \,. + + The empirical expectile at level :math:`\alpha` (`alpha`) of a sample + :math:`a_i` (the array `a`) is defined by plugging in the empirical CDF of + `a`. Given sample or case weights :math:`w` (the array `weights`), it + reads :math:`F_a(x) = \frac{1}{\sum_i w_i} \sum_i w_i 1_{a_i \leq x}` + with indicator function :math:`1_{A}`. This leads to the definition of the + empirical expectile at level `alpha` as the unique solution :math:`t` of: + + .. math:: + + \alpha \sum_{i=1}^n w_i (a_i - t)_+ = + (1 - \alpha) \sum_{i=1}^n w_i (t - a_i)_+ \,. + + For :math:`\alpha=0.5`, this simplifies to the weighted average. + Furthermore, the larger :math:`\alpha`, the larger the value of the + expectile. + + As a final remark, the expectile at level :math:`\alpha` can also be + written as a minimization problem. One often used choice is + + .. math:: + + \operatorname{argmin}_t + E(\lvert 1_{t\geq X} - \alpha\rvert(t - X)^2) \,. + + References + ---------- + .. [1] W. K. Newey and J. L. Powell (1987), "Asymmetric Least Squares + Estimation and Testing," Econometrica, 55, 819-847. + .. [2] T. Gneiting (2009). "Making and Evaluating Point Forecasts," + Journal of the American Statistical Association, 106, 746 - 762. + :doi:`10.48550/arXiv.0912.0902` + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import expectile + >>> a = [1, 4, 2, -1] + >>> expectile(a, alpha=0.5) == np.mean(a) + True + >>> expectile(a, alpha=0.2) + 0.42857142857142855 + >>> expectile(a, alpha=0.8) + 2.5714285714285716 + >>> weights = [1, 3, 1, 1] + + """ + if alpha < 0 or alpha > 1: + raise ValueError( + "The expectile level alpha must be in the range [0, 1]." + ) + a = np.asarray(a) + + if weights is not None: + weights = np.broadcast_to(weights, a.shape) + + # This is the empirical equivalent of Eq. (13) with identification + # function from Table 9 (omitting a factor of 2) in [2] (their y is our + # data a, their x is our t) + def first_order(t): + return np.average(np.abs((a <= t) - alpha) * (t - a), weights=weights) + + if alpha >= 0.5: + x0 = np.average(a, weights=weights) + x1 = np.amax(a) + else: + x1 = np.average(a, weights=weights) + x0 = np.amin(a) + + if x0 == x1: + # a has a single unique element + return x0 + + # Note that the expectile is the unique solution, so no worries about + # finding a wrong root. + res = root_scalar(first_order, x0=x0, x1=x1) + return res.root + + +def _lmoment_iv(sample, order, axis, sorted, standardize): + # input validation/standardization for `lmoment` + sample = np.asarray(sample) + message = "`sample` must be an array of real numbers." + if np.issubdtype(sample.dtype, np.integer): + sample = sample.astype(np.float64) + if not np.issubdtype(sample.dtype, np.floating): + raise ValueError(message) + + message = "`order` must be a scalar or a non-empty array of positive integers." + order = np.arange(1, 5) if order is None else np.asarray(order) + if not np.issubdtype(order.dtype, np.integer) or np.any(order <= 0): + raise ValueError(message) + + axis = np.asarray(axis)[()] + message = "`axis` must be an integer." + if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0: + raise ValueError(message) + + sorted = np.asarray(sorted)[()] + message = "`sorted` must be True or False." + if not np.issubdtype(sorted.dtype, np.bool_) or sorted.ndim != 0: + raise ValueError(message) + + standardize = np.asarray(standardize)[()] + message = "`standardize` must be True or False." + if not np.issubdtype(standardize.dtype, np.bool_) or standardize.ndim != 0: + raise ValueError(message) + + sample = np.moveaxis(sample, axis, -1) + sample = np.sort(sample, axis=-1) if not sorted else sample + + return sample, order, axis, sorted, standardize + + +def _br(x, *, r=0): + n = x.shape[-1] + x = np.expand_dims(x, axis=-2) + x = np.broadcast_to(x, x.shape[:-2] + (len(r), n)) + x = np.triu(x) + j = np.arange(n, dtype=x.dtype) + n = np.asarray(n, dtype=x.dtype)[()] + return (np.sum(special.binom(j, r[:, np.newaxis])*x, axis=-1) + / special.binom(n-1, r) / n) + + +def _prk(r, k): + # Writen to match [1] Equation 27 closely to facilitate review. + # This does not protect against overflow, so improvements to + # robustness would be a welcome follow-up. + return (-1)**(r-k)*special.binom(r, k)*special.binom(r+k, k) + + +@_axis_nan_policy_factory( # noqa: E302 + _moment_result_object, n_samples=1, result_to_tuple=_moment_tuple, + n_outputs=lambda kwds: _moment_outputs(kwds, [1, 2, 3, 4]) +) +def lmoment(sample, order=None, *, axis=0, sorted=False, standardize=True): + r"""Compute L-moments of a sample from a continuous distribution + + The L-moments of a probability distribution are summary statistics with + uses similar to those of conventional moments, but they are defined in + terms of the expected values of order statistics. + Sample L-moments are defined analogously to population L-moments, and + they can serve as estimators of population L-moments. They tend to be less + sensitive to extreme observations than conventional moments. + + Parameters + ---------- + sample : array_like + The real-valued sample whose L-moments are desired. + order : array_like, optional + The (positive integer) orders of the desired L-moments. + Must be a scalar or non-empty 1D array. Default is [1, 2, 3, 4]. + axis : int or None, default=0 + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear + in a corresponding element of the output. If None, the input will be + raveled before computing the statistic. + sorted : bool, default=False + Whether `sample` is already sorted in increasing order along `axis`. + If False (default), `sample` will be sorted. + standardize : bool, default=True + Whether to return L-moment ratios for orders 3 and higher. + L-moment ratios are analogous to standardized conventional + moments: they are the non-standardized L-moments divided + by the L-moment of order 2. + + Returns + ------- + lmoments : ndarray + The sample L-moments of order `order`. + + See Also + -------- + moment + + References + ---------- + .. [1] D. Bilkova. "L-Moments and TL-Moments as an Alternative Tool of + Statistical Data Analysis". Journal of Applied Mathematics and + Physics. 2014. :doi:`10.4236/jamp.2014.210104` + .. [2] J. R. M. Hosking. "L-Moments: Analysis and Estimation of Distributions + Using Linear Combinations of Order Statistics". Journal of the Royal + Statistical Society. 1990. :doi:`10.1111/j.2517-6161.1990.tb01775.x` + .. [3] "L-moment". *Wikipedia*. https://en.wikipedia.org/wiki/L-moment. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(328458568356392) + >>> sample = rng.exponential(size=100000) + >>> stats.lmoment(sample) + array([1.00124272, 0.50111437, 0.3340092 , 0.16755338]) + + Note that the first four standardized population L-moments of the standard + exponential distribution are 1, 1/2, 1/3, and 1/6; the sample L-moments + provide reasonable estimates. + + """ + args = _lmoment_iv(sample, order, axis, sorted, standardize) + sample, order, axis, sorted, standardize = args + + n_moments = np.max(order) + k = np.arange(n_moments, dtype=sample.dtype) + prk = _prk(np.expand_dims(k, tuple(range(1, sample.ndim+1))), k) + bk = _br(sample, r=k) + + n = sample.shape[-1] + bk[..., n:] = 0 # remove NaNs due to n_moments > n + + lmoms = np.sum(prk * bk, axis=-1) + if standardize and n_moments > 2: + lmoms[2:] /= lmoms[1] + + lmoms[n:] = np.nan # add NaNs where appropriate + return lmoms[order-1] + + +LinregressResult = _make_tuple_bunch('LinregressResult', + ['slope', 'intercept', 'rvalue', + 'pvalue', 'stderr'], + extra_field_names=['intercept_stderr']) + + +def linregress(x, y=None, alternative='two-sided'): + """ + Calculate a linear least-squares regression for two sets of measurements. + + Parameters + ---------- + x, y : array_like + Two sets of measurements. Both arrays should have the same length N. If + only `x` is given (and ``y=None``), then it must be a two-dimensional + array where one dimension has length 2. The two sets of measurements + are then found by splitting the array along the length-2 dimension. In + the case where ``y=None`` and `x` is a 2xN array, ``linregress(x)`` is + equivalent to ``linregress(x[0], x[1])``. + + .. deprecated:: 1.14.0 + Inference of the two sets of measurements from a single argument `x` + is deprecated will result in an error in SciPy 1.16.0; the sets + must be specified separately as `x` and `y`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the slope of the regression line is nonzero + * 'less': the slope of the regression line is less than zero + * 'greater': the slope of the regression line is greater than zero + + .. versionadded:: 1.7.0 + + Returns + ------- + result : ``LinregressResult`` instance + The return value is an object with the following attributes: + + slope : float + Slope of the regression line. + intercept : float + Intercept of the regression line. + rvalue : float + The Pearson correlation coefficient. The square of ``rvalue`` + is equal to the coefficient of determination. + pvalue : float + The p-value for a hypothesis test whose null hypothesis is + that the slope is zero, using Wald Test with t-distribution of + the test statistic. See `alternative` above for alternative + hypotheses. + stderr : float + Standard error of the estimated slope (gradient), under the + assumption of residual normality. + intercept_stderr : float + Standard error of the estimated intercept, under the assumption + of residual normality. + + See Also + -------- + scipy.optimize.curve_fit : + Use non-linear least squares to fit a function to data. + scipy.optimize.leastsq : + Minimize the sum of squares of a set of equations. + + Notes + ----- + For compatibility with older versions of SciPy, the return value acts + like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``, + ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write:: + + slope, intercept, r, p, se = linregress(x, y) + + With that style, however, the standard error of the intercept is not + available. To have access to all the computed values, including the + standard error of the intercept, use the return value as an object + with attributes, e.g.:: + + result = linregress(x, y) + print(result.intercept, result.intercept_stderr) + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> rng = np.random.default_rng() + + Generate some data: + + >>> x = rng.random(10) + >>> y = 1.6*x + rng.random(10) + + Perform the linear regression: + + >>> res = stats.linregress(x, y) + + Coefficient of determination (R-squared): + + >>> print(f"R-squared: {res.rvalue**2:.6f}") + R-squared: 0.717533 + + Plot the data along with the fitted line: + + >>> plt.plot(x, y, 'o', label='original data') + >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line') + >>> plt.legend() + >>> plt.show() + + Calculate 95% confidence interval on slope and intercept: + + >>> # Two-sided inverse Students t-distribution + >>> # p - probability, df - degrees of freedom + >>> from scipy.stats import t + >>> tinv = lambda p, df: abs(t.ppf(p/2, df)) + + >>> ts = tinv(0.05, len(x)-2) + >>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}") + slope (95%): 1.453392 +/- 0.743465 + >>> print(f"intercept (95%): {res.intercept:.6f}" + ... f" +/- {ts*res.intercept_stderr:.6f}") + intercept (95%): 0.616950 +/- 0.544475 + + """ + TINY = 1.0e-20 + if y is None: # x is a (2, N) or (N, 2) shaped array_like + message = ('Inference of the two sets of measurements from a single "' + 'argument `x` is deprecated will result in an error in "' + 'SciPy 1.16.0; the sets must be specified separately as "' + '`x` and `y`.') + warnings.warn(message, DeprecationWarning, stacklevel=2) + x = np.asarray(x) + if x.shape[0] == 2: + x, y = x + elif x.shape[1] == 2: + x, y = x.T + else: + raise ValueError("If only `x` is given as input, it has to " + "be of shape (2, N) or (N, 2); provided shape " + f"was {x.shape}.") + else: + x = np.asarray(x) + y = np.asarray(y) + + if x.size == 0 or y.size == 0: + raise ValueError("Inputs must not be empty.") + + if np.amax(x) == np.amin(x) and len(x) > 1: + raise ValueError("Cannot calculate a linear regression " + "if all x values are identical") + + n = len(x) + xmean = np.mean(x, None) + ymean = np.mean(y, None) + + # Average sums of square differences from the mean + # ssxm = mean( (x-mean(x))^2 ) + # ssxym = mean( (x-mean(x)) * (y-mean(y)) ) + ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat + + # R-value + # r = ssxym / sqrt( ssxm * ssym ) + if ssxm == 0.0 or ssym == 0.0: + # If the denominator was going to be 0 + r = 0.0 + else: + r = ssxym / np.sqrt(ssxm * ssym) + # Test for numerical error propagation (make sure -1 < r < 1) + if r > 1.0: + r = 1.0 + elif r < -1.0: + r = -1.0 + + slope = ssxym / ssxm + intercept = ymean - slope*xmean + if n == 2: + # handle case when only two points are passed in + if y[0] == y[1]: + prob = 1.0 + else: + prob = 0.0 + slope_stderr = 0.0 + intercept_stderr = 0.0 + else: + df = n - 2 # Number of degrees of freedom + # n-2 degrees of freedom because 2 has been used up + # to estimate the mean and standard deviation + t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) + + dist = _SimpleStudentT(df) + prob = _get_pvalue(t, dist, alternative, xp=np) + prob = prob[()] if prob.ndim == 0 else prob + + slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df) + + # Also calculate the standard error of the intercept + # The following relationship is used: + # ssxm = mean( (x-mean(x))^2 ) + # = ssx - sx*sx + # = mean( x^2 ) - mean(x)^2 + intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2) + + return LinregressResult(slope=slope, intercept=intercept, rvalue=r, + pvalue=prob, stderr=slope_stderr, + intercept_stderr=intercept_stderr) + + +def _xp_mean(x, /, *, axis=None, weights=None, keepdims=False, nan_policy='propagate', + dtype=None, xp=None): + r"""Compute the arithmetic mean along the specified axis. + + Parameters + ---------- + x : real array + Array containing real numbers whose mean is desired. + axis : int or tuple of ints, default: None + If an int or tuple of ints, the axis or axes of the input along which + to compute the statistic. The statistic of each axis-slice (e.g. row) + of the input will appear in a corresponding element of the output. + If ``None``, the input will be raveled before computing the statistic. + weights : real array, optional + If specified, an array of weights associated with the values in `x`; + otherwise ``1``. If `weights` and `x` do not have the same shape, the + arrays will be broadcasted before performing the calculation. See + Notes for details. + keepdims : boolean, optional + If this is set to ``True``, the axes which are reduced are left + in the result as dimensions with length one. With this option, + the result will broadcast correctly against the input array. + nan_policy : {'propagate', 'omit', 'raise'}, default: 'propagate' + Defines how to handle input NaNs. + + - ``propagate``: if a NaN is present in the axis slice (e.g. row) along + which the statistic is computed, the corresponding entry of the output + will be NaN. + - ``omit``: NaNs will be omitted when performing the calculation. + If insufficient data remains in the axis slice along which the + statistic is computed, the corresponding entry of the output will be + NaN. + - ``raise``: if a NaN is present, a ``ValueError`` will be raised. + + dtype : dtype, optional + Type to use in computing the mean. For integer inputs, the default is + the default float type of the array library; for floating point inputs, + the dtype is that of the input. + + Returns + ------- + out : array + The mean of each slice + + Notes + ----- + Let :math:`x_i` represent element :math:`i` of data `x` and let :math:`w_i` + represent the corresponding element of `weights` after broadcasting. Then the + (weighted) mean :math:`\bar{x}_w` is given by: + + .. math:: + + \bar{x}_w = \frac{ \sum_{i=0}^{n-1} w_i x_i } + { \sum_{i=0}^{n-1} w_i } + + where :math:`n` is the number of elements along a slice. Note that this simplifies + to the familiar :math:`(\sum_i x_i) / n` when the weights are all ``1`` (default). + + The behavior of this function with respect to weights is somewhat different + from that of `np.average`. For instance, + `np.average` raises an error when `axis` is not specified and the shapes of `x` + and the `weights` array are not the same; `xp_mean` simply broadcasts the two. + Also, `np.average` raises an error when weights sum to zero along a slice; + `xp_mean` computes the appropriate result. The intent is for this function's + interface to be consistent with the rest of `scipy.stats`. + + Note that according to the formula, including NaNs with zero weights is not + the same as *omitting* NaNs with ``nan_policy='omit'``; in the former case, + the NaNs will continue to propagate through the calculation whereas in the + latter case, the NaNs are excluded entirely. + + """ + # ensure that `x` and `weights` are array-API compatible arrays of identical shape + xp = array_namespace(x) if xp is None else xp + x = _asarray(x, dtype=dtype, subok=True) + weights = xp.asarray(weights, dtype=dtype) if weights is not None else weights + + # to ensure that this matches the behavior of decorated functions when one of the + # arguments has size zero, it's easiest to call a similar decorated function. + if is_numpy(xp) and (xp_size(x) == 0 + or (weights is not None and xp_size(weights) == 0)): + return gmean(x, weights=weights, axis=axis, keepdims=keepdims) + + x, weights = xp_broadcast_promote(x, weights, force_floating=True) + + # handle the special case of zero-sized arrays + message = (too_small_1d_not_omit if (x.ndim == 1 or axis is None) + else too_small_nd_not_omit) + if xp_size(x) == 0: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + res = xp.mean(x, axis=axis, keepdims=keepdims) + if xp_size(res) != 0: + warnings.warn(message, SmallSampleWarning, stacklevel=2) + return res + + contains_nan, _ = _contains_nan(x, nan_policy, xp_omit_okay=True, xp=xp) + if weights is not None: + contains_nan_w, _ = _contains_nan(weights, nan_policy, xp_omit_okay=True, xp=xp) + contains_nan = contains_nan | contains_nan_w + + # Handle `nan_policy='omit'` by giving zero weight to NaNs, whether they + # appear in `x` or `weights`. Emit warning if there is an all-NaN slice. + message = (too_small_1d_omit if (x.ndim == 1 or axis is None) + else too_small_nd_omit) + if contains_nan and nan_policy == 'omit': + nan_mask = xp.isnan(x) + if weights is not None: + nan_mask |= xp.isnan(weights) + if xp.any(xp.all(nan_mask, axis=axis)): + warnings.warn(message, SmallSampleWarning, stacklevel=2) + weights = xp.ones_like(x) if weights is None else weights + x = xp.where(nan_mask, xp.asarray(0, dtype=x.dtype), x) + weights = xp.where(nan_mask, xp.asarray(0, dtype=x.dtype), weights) + + # Perform the mean calculation itself + if weights is None: + return xp.mean(x, axis=axis, keepdims=keepdims) + + norm = xp.sum(weights, axis=axis) + wsum = xp.sum(x * weights, axis=axis) + with np.errstate(divide='ignore', invalid='ignore'): + res = wsum/norm + + # Respect `keepdims` and convert NumPy 0-D arrays to scalars + if keepdims: + + if axis is None: + final_shape = (1,) * len(x.shape) + else: + # axis can be a scalar or sequence + axes = (axis,) if not isinstance(axis, Sequence) else axis + final_shape = list(x.shape) + for i in axes: + final_shape[i] = 1 + + res = xp.reshape(res, final_shape) + + return res[()] if res.ndim == 0 else res + + +def _xp_var(x, /, *, axis=None, correction=0, keepdims=False, nan_policy='propagate', + dtype=None, xp=None): + # an array-api compatible function for variance with scipy.stats interface + # and features (e.g. `nan_policy`). + xp = array_namespace(x) if xp is None else xp + x = _asarray(x, subok=True) + + # use `_xp_mean` instead of `xp.var` for desired warning behavior + # it would be nice to combine this with `_var`, which uses `_moment` + # and therefore warns when precision is lost, but that does not support + # `axis` tuples or keepdims. Eventually, `_axis_nan_policy` will simplify + # `axis` tuples and implement `keepdims` for non-NumPy arrays; then it will + # be easy. + kwargs = dict(axis=axis, nan_policy=nan_policy, dtype=dtype, xp=xp) + mean = _xp_mean(x, keepdims=True, **kwargs) + x = _asarray(x, dtype=mean.dtype, subok=True) + x_mean = _demean(x, mean, axis, xp=xp) + x_mean_conj = (xp.conj(x_mean) if xp.isdtype(x_mean.dtype, 'complex floating') + else x_mean) # crossref data-apis/array-api#824 + var = _xp_mean(x_mean * x_mean_conj, keepdims=keepdims, **kwargs) + + if correction != 0: + if axis is None: + n = xp_size(x) + elif np.iterable(axis): # note: using NumPy on `axis` is OK + n = math.prod(x.shape[i] for i in axis) + else: + n = x.shape[axis] + # Or two lines with ternaries : ) + # axis = range(x.ndim) if axis is None else axis + # n = math.prod(x.shape[i] for i in axis) if iterable(axis) else x.shape[axis] + + n = xp.asarray(n, dtype=var.dtype) + + if nan_policy == 'omit': + nan_mask = xp.astype(xp.isnan(x), var.dtype) + n = n - xp.sum(nan_mask, axis=axis, keepdims=keepdims) + + # Produce NaNs silently when n - correction <= 0 + factor = _lazywhere(n-correction > 0, (n, n-correction), xp.divide, xp.nan) + var *= factor + + return var[()] if var.ndim == 0 else var + + +class _SimpleNormal: + # A very simple, array-API compatible normal distribution for use in + # hypothesis tests. May be replaced by new infrastructure Normal + # distribution in due time. + + def cdf(self, x): + return special.ndtr(x) + + def sf(self, x): + return special.ndtr(-x) + + def isf(self, x): + return -special.ndtri(x) + + +class _SimpleChi2: + # A very simple, array-API compatible chi-squared distribution for use in + # hypothesis tests. May be replaced by new infrastructure chi-squared + # distribution in due time. + def __init__(self, df): + self.df = df + + def cdf(self, x): + return special.chdtr(self.df, x) + + def sf(self, x): + return special.chdtrc(self.df, x) + + +class _SimpleBeta: + # A very simple, array-API compatible beta distribution for use in + # hypothesis tests. May be replaced by new infrastructure beta + # distribution in due time. + def __init__(self, a, b, *, loc=None, scale=None): + self.a = a + self.b = b + self.loc = loc + self.scale = scale + + def cdf(self, x): + if self.loc is not None or self.scale is not None: + loc = 0 if self.loc is None else self.loc + scale = 1 if self.scale is None else self.scale + return special.betainc(self.a, self.b, (x - loc)/scale) + return special.betainc(self.a, self.b, x) + + def sf(self, x): + if self.loc is not None or self.scale is not None: + loc = 0 if self.loc is None else self.loc + scale = 1 if self.scale is None else self.scale + return special.betaincc(self.a, self.b, (x - loc)/scale) + return special.betaincc(self.a, self.b, x) + + +class _SimpleStudentT: + # A very simple, array-API compatible t distribution for use in + # hypothesis tests. May be replaced by new infrastructure t + # distribution in due time. + def __init__(self, df): + self.df = df + + def cdf(self, t): + return special.stdtr(self.df, t) + + def sf(self, t): + return special.stdtr(self.df, -t) diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py b/phi4/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py new file mode 100644 index 0000000000000000000000000000000000000000..bdb475614d7d28edaf3ff04bfbda3f5a18242bef --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py @@ -0,0 +1,259 @@ +import numpy as np + +from scipy import stats +from ._stats_py import _get_pvalue, _rankdata, _SimpleNormal +from . import _morestats +from ._axis_nan_policy import _broadcast_arrays +from ._hypotests import _get_wilcoxon_distr +from scipy._lib._util import _lazywhere, _get_nan + + +class WilcoxonDistribution: + + def __init__(self, n): + n = np.asarray(n).astype(int, copy=False) + self.n = n + self._dists = {ni: _get_wilcoxon_distr(ni) for ni in np.unique(n)} + + def _cdf1(self, k, n): + pmfs = self._dists[n] + return pmfs[:k + 1].sum() + + def _cdf(self, k, n): + return np.vectorize(self._cdf1, otypes=[float])(k, n) + + def _sf1(self, k, n): + pmfs = self._dists[n] + return pmfs[k:].sum() + + def _sf(self, k, n): + return np.vectorize(self._sf1, otypes=[float])(k, n) + + def mean(self): + return self.n * (self.n + 1) / 4 + + def _prep(self, k): + k = np.asarray(k).astype(int, copy=False) + mn = self.mean() + out = np.empty(k.shape, dtype=np.float64) + return k, mn, out + + def cdf(self, k): + k, mn, out = self._prep(k) + return _lazywhere(k <= mn, (k, self.n), self._cdf, + f2=lambda k, n: 1 - self._sf(k+1, n))[()] + + def sf(self, k): + k, mn, out = self._prep(k) + return _lazywhere(k <= mn, (k, self.n), self._sf, + f2=lambda k, n: 1 - self._cdf(k-1, n))[()] + + +def _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis): + + axis = np.asarray(axis)[()] + message = "`axis` must be an integer." + if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0: + raise ValueError(message) + + message = '`axis` must be compatible with the shape(s) of `x` (and `y`)' + try: + if y is None: + x = np.asarray(x) + d = x + else: + x, y = _broadcast_arrays((x, y), axis=axis) + d = x - y + d = np.moveaxis(d, axis, -1) + except np.AxisError as e: + raise ValueError(message) from e + + message = "`x` and `y` must have the same length along `axis`." + if y is not None and x.shape[axis] != y.shape[axis]: + raise ValueError(message) + + message = "`x` (and `y`, if provided) must be an array of real numbers." + if np.issubdtype(d.dtype, np.integer): + d = d.astype(np.float64) + if not np.issubdtype(d.dtype, np.floating): + raise ValueError(message) + + zero_method = str(zero_method).lower() + zero_methods = {"wilcox", "pratt", "zsplit"} + message = f"`zero_method` must be one of {zero_methods}." + if zero_method not in zero_methods: + raise ValueError(message) + + corrections = {True, False} + message = f"`correction` must be one of {corrections}." + if correction not in corrections: + raise ValueError(message) + + alternative = str(alternative).lower() + alternatives = {"two-sided", "less", "greater"} + message = f"`alternative` must be one of {alternatives}." + if alternative not in alternatives: + raise ValueError(message) + + if not isinstance(method, stats.PermutationMethod): + methods = {"auto", "asymptotic", "exact"} + message = (f"`method` must be one of {methods} or " + "an instance of `stats.PermutationMethod`.") + if method not in methods: + raise ValueError(message) + output_z = True if method == 'asymptotic' else False + + # For small samples, we decide later whether to perform an exact test or a + # permutation test. The reason is that the presence of ties is not + # known at the input validation stage. + n_zero = np.sum(d == 0) + if method == "auto" and d.shape[-1] > 50: + method = "asymptotic" + + return d, zero_method, correction, alternative, method, axis, output_z, n_zero + + +def _wilcoxon_statistic(d, method, zero_method='wilcox'): + + i_zeros = (d == 0) + + if zero_method == 'wilcox': + # Wilcoxon's method for treating zeros was to remove them from + # the calculation. We do this by replacing 0s with NaNs, which + # are ignored anyway. + if not d.flags['WRITEABLE']: + d = d.copy() + d[i_zeros] = np.nan + + i_nan = np.isnan(d) + n_nan = np.sum(i_nan, axis=-1) + count = d.shape[-1] - n_nan + + r, t = _rankdata(abs(d), 'average', return_ties=True) + + r_plus = np.sum((d > 0) * r, axis=-1) + r_minus = np.sum((d < 0) * r, axis=-1) + + has_ties = (t == 0).any() + + if zero_method == "zsplit": + # The "zero-split" method for treating zeros is to add half their contribution + # to r_plus and half to r_minus. + # See gh-2263 for the origin of this method. + r_zero_2 = np.sum(i_zeros * r, axis=-1) / 2 + r_plus += r_zero_2 + r_minus += r_zero_2 + + mn = count * (count + 1.) * 0.25 + se = count * (count + 1.) * (2. * count + 1.) + + if zero_method == "pratt": + # Pratt's method for treating zeros was just to modify the z-statistic. + + # normal approximation needs to be adjusted, see Cureton (1967) + n_zero = i_zeros.sum(axis=-1) + mn -= n_zero * (n_zero + 1.) * 0.25 + se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.) + + # zeros are not to be included in tie-correction. + # any tie counts corresponding with zeros are in the 0th column + t[i_zeros.any(axis=-1), 0] = 0 + + tie_correct = (t**3 - t).sum(axis=-1) + se -= tie_correct/2 + se = np.sqrt(se / 24) + + # se = 0 means that no non-zero values are left in d. we only need z + # if method is asymptotic. however, if method="auto", the switch to + # asymptotic might only happen after the statistic is calculated, so z + # needs to be computed. in all other cases, avoid division by zero warning + # (z is not needed anyways) + if method in ["asymptotic", "auto"]: + z = (r_plus - mn) / se + else: + z = np.nan + + return r_plus, r_minus, se, z, count, has_ties + + +def _correction_sign(z, alternative): + if alternative == 'greater': + return 1 + elif alternative == 'less': + return -1 + else: + return np.sign(z) + + +def _wilcoxon_nd(x, y=None, zero_method='wilcox', correction=True, + alternative='two-sided', method='auto', axis=0): + + temp = _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis) + d, zero_method, correction, alternative, method, axis, output_z, n_zero = temp + + if d.size == 0: + NaN = _get_nan(d) + res = _morestats.WilcoxonResult(statistic=NaN, pvalue=NaN) + if method == 'asymptotic': + res.zstatistic = NaN + return res + + r_plus, r_minus, se, z, count, has_ties = _wilcoxon_statistic( + d, method, zero_method + ) + + # we only know if there are ties after computing the statistic and not + # at the input validation stage. if the original method was auto and + # the decision was to use an exact test, we override this to + # a permutation test now (since method='exact' is not exact in the + # presence of ties) + if method == "auto": + if not (has_ties or n_zero > 0): + method = "exact" + elif d.shape[-1] <= 13: + # the possible outcomes to be simulated by the permutation test + # are 2**n, where n is the sample size. + # if n <= 13, the p-value is deterministic since 2**13 is less + # than 9999, the default number of n_resamples + method = stats.PermutationMethod() + else: + # if there are ties and the sample size is too large to + # run a deterministic permutation test, fall back to asymptotic + method = "asymptotic" + + if method == 'asymptotic': + if correction: + sign = _correction_sign(z, alternative) + z -= sign * 0.5 / se + p = _get_pvalue(z, _SimpleNormal(), alternative, xp=np) + elif method == 'exact': + dist = WilcoxonDistribution(count) + # The null distribution in `dist` is exact only if there are no ties + # or zeros. If there are ties or zeros, the statistic can be non- + # integral, but the null distribution is only defined for integral + # values of the statistic. Therefore, we're conservative: round + # non-integral statistic up before computing CDF and down before + # computing SF. This preserves symmetry w.r.t. alternatives and + # order of the input arguments. See gh-19872. + if alternative == 'less': + p = dist.cdf(np.ceil(r_plus)) + elif alternative == 'greater': + p = dist.sf(np.floor(r_plus)) + else: + p = 2 * np.minimum(dist.sf(np.floor(r_plus)), + dist.cdf(np.ceil(r_plus))) + p = np.clip(p, 0, 1) + else: # `PermutationMethod` instance (already validated) + p = stats.permutation_test( + (d,), lambda d: _wilcoxon_statistic(d, method, zero_method)[0], + permutation_type='samples', **method._asdict(), + alternative=alternative, axis=-1).pvalue + + # for backward compatibility... + statistic = np.minimum(r_plus, r_minus) if alternative=='two-sided' else r_plus + z = -np.abs(z) if (alternative == 'two-sided' and method == 'asymptotic') else z + + res = _morestats.WilcoxonResult(statistic=statistic, pvalue=p[()]) + if output_z: + res.zstatistic = z[()] + return res diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/contingency.py b/phi4/lib/python3.10/site-packages/scipy/stats/contingency.py new file mode 100644 index 0000000000000000000000000000000000000000..809df62e3cdaa2887fab2164afbe01e496ac6315 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/contingency.py @@ -0,0 +1,521 @@ +""" +Contingency table functions (:mod:`scipy.stats.contingency`) +============================================================ + +Functions for creating and analyzing contingency tables. + +.. currentmodule:: scipy.stats.contingency + +.. autosummary:: + :toctree: generated/ + + chi2_contingency + relative_risk + odds_ratio + crosstab + association + + expected_freq + margins + +""" + + +from functools import reduce +import math +import numpy as np +from ._stats_py import power_divergence, _untabulate +from ._relative_risk import relative_risk +from ._crosstab import crosstab +from ._odds_ratio import odds_ratio +from scipy._lib._bunch import _make_tuple_bunch +from scipy import stats + + +__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab', + 'association', 'relative_risk', 'odds_ratio'] + + +def margins(a): + """Return a list of the marginal sums of the array `a`. + + Parameters + ---------- + a : ndarray + The array for which to compute the marginal sums. + + Returns + ------- + margsums : list of ndarrays + A list of length `a.ndim`. `margsums[k]` is the result + of summing `a` over all axes except `k`; it has the same + number of dimensions as `a`, but the length of each axis + except axis `k` will be 1. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.contingency import margins + + >>> a = np.arange(12).reshape(2, 6) + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11]]) + >>> m0, m1 = margins(a) + >>> m0 + array([[15], + [51]]) + >>> m1 + array([[ 6, 8, 10, 12, 14, 16]]) + + >>> b = np.arange(24).reshape(2,3,4) + >>> m0, m1, m2 = margins(b) + >>> m0 + array([[[ 66]], + [[210]]]) + >>> m1 + array([[[ 60], + [ 92], + [124]]]) + >>> m2 + array([[[60, 66, 72, 78]]]) + """ + margsums = [] + ranged = list(range(a.ndim)) + for k in ranged: + marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k]) + margsums.append(marg) + return margsums + + +def expected_freq(observed): + """ + Compute the expected frequencies from a contingency table. + + Given an n-dimensional contingency table of observed frequencies, + compute the expected frequencies for the table based on the marginal + sums under the assumption that the groups associated with each + dimension are independent. + + Parameters + ---------- + observed : array_like + The table of observed frequencies. (While this function can handle + a 1-D array, that case is trivial. Generally `observed` is at + least 2-D.) + + Returns + ------- + expected : ndarray of float64 + The expected frequencies, based on the marginal sums of the table. + Same shape as `observed`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats.contingency import expected_freq + >>> observed = np.array([[10, 10, 20],[20, 20, 20]]) + >>> expected_freq(observed) + array([[ 12., 12., 16.], + [ 18., 18., 24.]]) + + """ + # Typically `observed` is an integer array. If `observed` has a large + # number of dimensions or holds large values, some of the following + # computations may overflow, so we first switch to floating point. + observed = np.asarray(observed, dtype=np.float64) + + # Create a list of the marginal sums. + margsums = margins(observed) + + # Create the array of expected frequencies. The shapes of the + # marginal sums returned by apply_over_axes() are just what we + # need for broadcasting in the following product. + d = observed.ndim + expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1) + return expected + + +Chi2ContingencyResult = _make_tuple_bunch( + 'Chi2ContingencyResult', + ['statistic', 'pvalue', 'dof', 'expected_freq'], [] +) + + +def chi2_contingency(observed, correction=True, lambda_=None, *, method=None): + """Chi-square test of independence of variables in a contingency table. + + This function computes the chi-square statistic and p-value for the + hypothesis test of independence of the observed frequencies in the + contingency table [1]_ `observed`. The expected frequencies are computed + based on the marginal sums under the assumption of independence; see + `scipy.stats.contingency.expected_freq`. The number of degrees of + freedom is (expressed using numpy functions and attributes):: + + dof = observed.size - sum(observed.shape) + observed.ndim - 1 + + + Parameters + ---------- + observed : array_like + The contingency table. The table contains the observed frequencies + (i.e. number of occurrences) in each category. In the two-dimensional + case, the table is often described as an "R x C table". + correction : bool, optional + If True, *and* the degrees of freedom is 1, apply Yates' correction + for continuity. The effect of the correction is to adjust each + observed value by 0.5 towards the corresponding expected value. + lambda_ : float or str, optional + By default, the statistic computed in this test is Pearson's + chi-squared statistic [2]_. `lambda_` allows a statistic from the + Cressie-Read power divergence family [3]_ to be used instead. See + `scipy.stats.power_divergence` for details. + method : ResamplingMethod, optional + Defines the method used to compute the p-value. Compatible only with + `correction=False`, default `lambda_`, and two-way tables. + If `method` is an instance of `PermutationMethod`/`MonteCarloMethod`, + the p-value is computed using + `scipy.stats.permutation_test`/`scipy.stats.monte_carlo_test` with the + provided configuration options and other appropriate settings. + Otherwise, the p-value is computed as documented in the notes. + Note that if `method` is an instance of `MonteCarloMethod`, the ``rvs`` + attribute must be left unspecified; Monte Carlo samples are always drawn + using the ``rvs`` method of `scipy.stats.random_table`. + + .. versionadded:: 1.15.0 + + + Returns + ------- + res : Chi2ContingencyResult + An object containing attributes: + + statistic : float + The test statistic. + pvalue : float + The p-value of the test. + dof : int + The degrees of freedom. NaN if `method` is not ``None``. + expected_freq : ndarray, same shape as `observed` + The expected frequencies, based on the marginal sums of the table. + + See Also + -------- + scipy.stats.contingency.expected_freq + scipy.stats.fisher_exact + scipy.stats.chisquare + scipy.stats.power_divergence + scipy.stats.barnard_exact + scipy.stats.boschloo_exact + :ref:`hypothesis_chi2_contingency` : Extended example + + Notes + ----- + An often quoted guideline for the validity of this calculation is that + the test should be used only if the observed and expected frequencies + in each cell are at least 5. + + This is a test for the independence of different categories of a + population. The test is only meaningful when the dimension of + `observed` is two or more. Applying the test to a one-dimensional + table will always result in `expected` equal to `observed` and a + chi-square statistic equal to 0. + + This function does not handle masked arrays, because the calculation + does not make sense with missing values. + + Like `scipy.stats.chisquare`, this function computes a chi-square + statistic; the convenience this function provides is to figure out the + expected frequencies and degrees of freedom from the given contingency + table. If these were already known, and if the Yates' correction was not + required, one could use `scipy.stats.chisquare`. That is, if one calls:: + + res = chi2_contingency(obs, correction=False) + + then the following is true:: + + (res.statistic, res.pvalue) == stats.chisquare(obs.ravel(), + f_exp=ex.ravel(), + ddof=obs.size - 1 - dof) + + The `lambda_` argument was added in version 0.13.0 of scipy. + + References + ---------- + .. [1] "Contingency table", + https://en.wikipedia.org/wiki/Contingency_table + .. [2] "Pearson's chi-squared test", + https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test + .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit + Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), + pp. 440-464. + + Examples + -------- + A two-way example (2 x 3): + + >>> import numpy as np + >>> from scipy.stats import chi2_contingency + >>> obs = np.array([[10, 10, 20], [20, 20, 20]]) + >>> res = chi2_contingency(obs) + >>> res.statistic + 2.7777777777777777 + >>> res.pvalue + 0.24935220877729619 + >>> res.dof + 2 + >>> res.expected_freq + array([[ 12., 12., 16.], + [ 18., 18., 24.]]) + + Perform the test using the log-likelihood ratio (i.e. the "G-test") + instead of Pearson's chi-squared statistic. + + >>> res = chi2_contingency(obs, lambda_="log-likelihood") + >>> res.statistic + 2.7688587616781319 + >>> res.pvalue + 0.25046668010954165 + + A four-way example (2 x 2 x 2 x 2): + + >>> obs = np.array( + ... [[[[12, 17], + ... [11, 16]], + ... [[11, 12], + ... [15, 16]]], + ... [[[23, 15], + ... [30, 22]], + ... [[14, 17], + ... [15, 16]]]]) + >>> res = chi2_contingency(obs) + >>> res.statistic + 8.7584514426741897 + >>> res.pvalue + 0.64417725029295503 + + When the sum of the elements in a two-way table is small, the p-value + produced by the default asymptotic approximation may be inaccurate. + Consider passing a `PermutationMethod` or `MonteCarloMethod` as the + `method` parameter with `correction=False`. + + >>> from scipy.stats import PermutationMethod + >>> obs = np.asarray([[12, 3], + ... [17, 16]]) + >>> res = chi2_contingency(obs, correction=False) + >>> ref = chi2_contingency(obs, correction=False, method=PermutationMethod()) + >>> res.pvalue, ref.pvalue + (0.0614122539870913, 0.1074) # may vary + + For a more detailed example, see :ref:`hypothesis_chi2_contingency`. + + """ + observed = np.asarray(observed) + if np.any(observed < 0): + raise ValueError("All values in `observed` must be nonnegative.") + if observed.size == 0: + raise ValueError("No data; `observed` has size 0.") + + expected = expected_freq(observed) + if np.any(expected == 0): + # Include one of the positions where expected is zero in + # the exception message. + zeropos = list(zip(*np.nonzero(expected == 0)))[0] + raise ValueError("The internally computed table of expected " + f"frequencies has a zero element at {zeropos}.") + + if method is not None: + return _chi2_resampling_methods(observed, expected, correction, lambda_, method) + + # The degrees of freedom + dof = expected.size - sum(expected.shape) + expected.ndim - 1 + + if dof == 0: + # Degenerate case; this occurs when `observed` is 1D (or, more + # generally, when it has only one nontrivial dimension). In this + # case, we also have observed == expected, so chi2 is 0. + chi2 = 0.0 + p = 1.0 + else: + if dof == 1 and correction: + # Adjust `observed` according to Yates' correction for continuity. + # Magnitude of correction no bigger than difference; see gh-13875 + diff = expected - observed + direction = np.sign(diff) + magnitude = np.minimum(0.5, np.abs(diff)) + observed = observed + magnitude * direction + + chi2, p = power_divergence(observed, expected, + ddof=observed.size - 1 - dof, axis=None, + lambda_=lambda_) + + return Chi2ContingencyResult(chi2, p, dof, expected) + + +def _chi2_resampling_methods(observed, expected, correction, lambda_, method): + + if observed.ndim != 2: + message = 'Use of `method` is only compatible with two-way tables.' + raise ValueError(message) + + if correction: + message = f'`{correction=}` is not compatible with `{method=}.`' + raise ValueError(message) + + if lambda_ is not None: + message = f'`{lambda_=}` is not compatible with `{method=}.`' + raise ValueError(message) + + if isinstance(method, stats.PermutationMethod): + res = _chi2_permutation_method(observed, expected, method) + elif isinstance(method, stats.MonteCarloMethod): + res = _chi2_monte_carlo_method(observed, expected, method) + else: + message = (f'`{method=}` not recognized; if provided, `method` must be an ' + 'instance of `PermutationMethod` or `MonteCarloMethod`.') + raise ValueError(message) + + return Chi2ContingencyResult(res.statistic, res.pvalue, np.nan, expected) + + +def _chi2_permutation_method(observed, expected, method): + x, y = _untabulate(observed) + # `permutation_test` with `permutation_type='pairings' permutes the order of `x`, + # which pairs observations in `x` with different observations in `y`. + def statistic(x): + # crosstab the resample and compute the statistic + table = crosstab(x, y)[1] + return np.sum((table - expected)**2/expected) + + return stats.permutation_test((x,), statistic, permutation_type='pairings', + alternative='greater', **method._asdict()) + + +def _chi2_monte_carlo_method(observed, expected, method): + method = method._asdict() + + if method.pop('rvs', None) is not None: + message = ('If the `method` argument of `chi2_contingency` is an ' + 'instance of `MonteCarloMethod`, its `rvs` attribute ' + 'must be unspecified. Use the `MonteCarloMethod` `rng` argument ' + 'to control the random state.') + raise ValueError(message) + rng = np.random.default_rng(method.pop('rng', None)) + + # `random_table.rvs` produces random contingency tables with the given marginals + # under the null hypothesis of independence + rowsums, colsums = stats.contingency.margins(observed) + X = stats.random_table(rowsums.ravel(), colsums.ravel(), seed=rng) + def rvs(size): + n_resamples = size[0] + return X.rvs(size=n_resamples).reshape(size) + + expected = expected.ravel() + def statistic(table, axis): + return np.sum((table - expected)**2/expected, axis=axis) + + return stats.monte_carlo_test(observed.ravel(), rvs, statistic, + alternative='greater', **method) + + +def association(observed, method="cramer", correction=False, lambda_=None): + """Calculates degree of association between two nominal variables. + + The function provides the option for computing one of three measures of + association between two nominal variables from the data given in a 2d + contingency table: Tschuprow's T, Pearson's Contingency Coefficient + and Cramer's V. + + Parameters + ---------- + observed : array-like + The array of observed values + method : {"cramer", "tschuprow", "pearson"} (default = "cramer") + The association test statistic. + correction : bool, optional + Inherited from `scipy.stats.contingency.chi2_contingency()` + lambda_ : float or str, optional + Inherited from `scipy.stats.contingency.chi2_contingency()` + + Returns + ------- + statistic : float + Value of the test statistic + + Notes + ----- + Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all + measure the degree to which two nominal or ordinal variables are related, + or the level of their association. This differs from correlation, although + many often mistakenly consider them equivalent. Correlation measures in + what way two variables are related, whereas, association measures how + related the variables are. As such, association does not subsume + independent variables, and is rather a test of independence. A value of + 1.0 indicates perfect association, and 0.0 means the variables have no + association. + + Both the Cramer's V and Tschuprow's T are extensions of the phi + coefficient. Moreover, due to the close relationship between the + Cramer's V and Tschuprow's T the returned values can often be similar + or even equivalent. They are likely to diverge more as the array shape + diverges from a 2x2. + + References + ---------- + .. [1] "Tschuprow's T", + https://en.wikipedia.org/wiki/Tschuprow's_T + .. [2] Tschuprow, A. A. (1939) + Principles of the Mathematical Theory of Correlation; + translated by M. Kantorowitsch. W. Hodge & Co. + .. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V + .. [4] "Nominal Association: Phi and Cramer's V", + http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html + .. [5] Gingrich, Paul, "Association Between Variables", + http://uregina.ca/~gingrich/ch11a.pdf + + Examples + -------- + An example with a 4x2 contingency table: + + >>> import numpy as np + >>> from scipy.stats.contingency import association + >>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]]) + + Pearson's contingency coefficient + + >>> association(obs4x2, method="pearson") + 0.18303298140595667 + + Cramer's V + + >>> association(obs4x2, method="cramer") + 0.18617813077483678 + + Tschuprow's T + + >>> association(obs4x2, method="tschuprow") + 0.14146478765062995 + """ + arr = np.asarray(observed) + if not np.issubdtype(arr.dtype, np.integer): + raise ValueError("`observed` must be an integer array.") + + if len(arr.shape) != 2: + raise ValueError("method only accepts 2d arrays") + + chi2_stat = chi2_contingency(arr, correction=correction, + lambda_=lambda_) + + phi2 = chi2_stat.statistic / arr.sum() + n_rows, n_cols = arr.shape + if method == "cramer": + value = phi2 / min(n_cols - 1, n_rows - 1) + elif method == "tschuprow": + value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1)) + elif method == 'pearson': + value = phi2 / (1 + phi2) + else: + raise ValueError("Invalid argument value: 'method' argument must " + "be 'cramer', 'tschuprow', or 'pearson'") + + return math.sqrt(value) diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/kde.py b/phi4/lib/python3.10/site-packages/scipy/stats/kde.py new file mode 100644 index 0000000000000000000000000000000000000000..4401da5a30f4452ab394232d3928493d0e3b77ec --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/kde.py @@ -0,0 +1,18 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = ["gaussian_kde"] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="kde", + private_modules=["_kde"], all=__all__, + attribute=name) diff --git a/phi4/lib/python3.10/site-packages/scipy/stats/mstats_extras.py b/phi4/lib/python3.10/site-packages/scipy/stats/mstats_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..fec695329cf2c2d58a4918cc99e209c0650c3ea6 --- /dev/null +++ b/phi4/lib/python3.10/site-packages/scipy/stats/mstats_extras.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'compare_medians_ms', + 'hdquantiles', 'hdmedian', 'hdquantiles_sd', + 'idealfourths', + 'median_cihs','mjci','mquantiles_cimj', + 'rsh', + 'trimmed_mean_ci', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mstats_extras", + private_modules=["_mstats_extras"], all=__all__, + attribute=name, correct_module="mstats")