diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_basic.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..197f9c625e97a8de569af2c6a39a9d6dddaee1e0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_basic.py @@ -0,0 +1,4383 @@ +# this program corresponds to special.py + +### Means test is not done yet +# E Means test is giving error (E) +# F Means test is failing (F) +# EF Means test is giving error and Failing +#! Means test is segfaulting +# 8 Means test runs forever + +### test_besselpoly +### test_mathieu_a +### test_mathieu_even_coef +### test_mathieu_odd_coef +### test_modfresnelp +### test_modfresnelm +# test_pbdv_seq +### test_pbvv_seq +### test_sph_harm + +import functools +import itertools +import operator +import platform +import sys + +import numpy as np +from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, + log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, double, + array_equal) + +import pytest +from pytest import raises as assert_raises +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_equal, assert_array_almost_equal, assert_approx_equal, + assert_, assert_allclose, assert_array_almost_equal_nulp, + suppress_warnings) + +from scipy import special +import scipy.special._ufuncs as cephes +from scipy.special import ellipe, ellipk, ellipkm1 +from scipy.special import elliprc, elliprd, elliprf, elliprg, elliprj +from scipy.special import mathieu_odd_coef, mathieu_even_coef, stirling2 +from scipy._lib._util import np_long, np_ulong + +from scipy.special._basic import _FACTORIALK_LIMITS_64BITS, \ + _FACTORIALK_LIMITS_32BITS +from scipy.special._testutils import with_special_errors, \ + assert_func_equal, FuncData + +import math + + +class TestCephes: + def test_airy(self): + cephes.airy(0) + + def test_airye(self): + cephes.airye(0) + + def test_binom(self): + n = np.array([0.264, 4, 5.2, 17]) + k = np.array([2, 0.4, 7, 3.3]) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389, + -0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846], + [10.92, 2.22993515861399, -0.00585728, 10.468891352063146], + [136, 3.5252179590758828, 19448, 1024.5526916174495]]) + assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13) + + # Test branches in implementation + np.random.seed(1234) + n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500] + k = np.arange(0, 102) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + + assert_func_equal(cephes.binom, + cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)), + nk, + atol=1e-10, rtol=1e-10) + + def test_binom_2(self): + # Test branches in implementation + np.random.seed(1234) + n = np.r_[np.logspace(1, 300, 20)] + k = np.arange(0, 102) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + + assert_func_equal(cephes.binom, + cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)), + nk, + atol=1e-10, rtol=1e-10) + + def test_binom_exact(self): + @np.vectorize + def binom_int(n, k): + n = int(n) + k = int(k) + num = 1 + den = 1 + for i in range(1, k+1): + num *= i + n - k + den *= i + return float(num/den) + + np.random.seed(1234) + n = np.arange(1, 15) + k = np.arange(0, 15) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + nk = nk[nk[:,0] >= nk[:,1]] + assert_func_equal(cephes.binom, + binom_int(nk[:,0], nk[:,1]), + nk, + atol=0, rtol=0) + + def test_binom_nooverflow_8346(self): + # Test (binom(n, k) doesn't overflow prematurely */ + dataset = [ + (1000, 500, 2.70288240945436551e+299), + (1002, 501, 1.08007396880791225e+300), + (1004, 502, 4.31599279169058121e+300), + (1006, 503, 1.72468101616263781e+301), + (1008, 504, 6.89188009236419153e+301), + (1010, 505, 2.75402257948335448e+302), + (1012, 506, 1.10052048531923757e+303), + (1014, 507, 4.39774063758732849e+303), + (1016, 508, 1.75736486108312519e+304), + (1018, 509, 7.02255427788423734e+304), + (1020, 510, 2.80626776829962255e+305), + (1022, 511, 1.12140876377061240e+306), + (1024, 512, 4.48125455209897109e+306), + (1026, 513, 1.79075474304149900e+307), + (1028, 514, 7.15605105487789676e+307) + ] + dataset = np.asarray(dataset) + FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check() + + def test_bdtr(self): + assert_equal(cephes.bdtr(1,1,0.5),1.0) + + def test_bdtri(self): + assert_equal(cephes.bdtri(1,3,0.5),0.5) + + def test_bdtrc(self): + assert_equal(cephes.bdtrc(1,3,0.5),0.5) + + def test_bdtrin(self): + assert_equal(cephes.bdtrin(1,0,1),5.0) + + def test_bdtrik(self): + cephes.bdtrik(1,3,0.5) + + def test_bei(self): + assert_equal(cephes.bei(0),0.0) + + def test_beip(self): + assert_equal(cephes.beip(0),0.0) + + def test_ber(self): + assert_equal(cephes.ber(0),1.0) + + def test_berp(self): + assert_equal(cephes.berp(0),0.0) + + def test_besselpoly(self): + assert_equal(cephes.besselpoly(0,0,0),1.0) + + def test_btdtr(self): + with pytest.deprecated_call(match='deprecated in SciPy 1.12.0'): + y = special.btdtr(1, 1, 1) + assert_equal(y, 1.0) + + def test_btdtri(self): + with pytest.deprecated_call(match='deprecated in SciPy 1.12.0'): + y = special.btdtri(1, 1, 1) + assert_equal(y, 1.0) + + def test_btdtria(self): + assert_equal(cephes.btdtria(1,1,1),5.0) + + def test_btdtrib(self): + assert_equal(cephes.btdtrib(1,1,1),5.0) + + def test_cbrt(self): + assert_approx_equal(cephes.cbrt(1),1.0) + + def test_chdtr(self): + assert_equal(cephes.chdtr(1,0),0.0) + + def test_chdtrc(self): + assert_equal(cephes.chdtrc(1,0),1.0) + + def test_chdtri(self): + assert_equal(cephes.chdtri(1,1),0.0) + + def test_chdtriv(self): + assert_equal(cephes.chdtriv(0,0),5.0) + + def test_chndtr(self): + assert_equal(cephes.chndtr(0,1,0),0.0) + + # Each row holds (x, nu, lam, expected_value) + # These values were computed using Wolfram Alpha with + # CDF[NoncentralChiSquareDistribution[nu, lam], x] + values = np.array([ + [25.00, 20.0, 400, 4.1210655112396197139e-57], + [25.00, 8.00, 250, 2.3988026526832425878e-29], + [0.001, 8.00, 40., 5.3761806201366039084e-24], + [0.010, 8.00, 40., 5.45396231055999457039e-20], + [20.00, 2.00, 107, 1.39390743555819597802e-9], + [22.50, 2.00, 107, 7.11803307138105870671e-9], + [25.00, 2.00, 107, 3.11041244829864897313e-8], + [3.000, 2.00, 1.0, 0.62064365321954362734], + [350.0, 300., 10., 0.93880128006276407710], + [100.0, 13.5, 10., 0.99999999650104210949], + [700.0, 20.0, 400, 0.99999999925680650105], + [150.0, 13.5, 10., 0.99999999999999983046], + [160.0, 13.5, 10., 0.99999999999999999518], # 1.0 + ]) + cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2]) + assert_allclose(cdf, values[:, 3], rtol=1e-12) + + assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0) + assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0) + assert_(np.isnan(cephes.chndtr(np.nan, 1, 2))) + assert_(np.isnan(cephes.chndtr(5, np.nan, 2))) + assert_(np.isnan(cephes.chndtr(5, 1, np.nan))) + + def test_chndtridf(self): + assert_equal(cephes.chndtridf(0,0,1),5.0) + + def test_chndtrinc(self): + assert_equal(cephes.chndtrinc(0,1,0),5.0) + + def test_chndtrix(self): + assert_equal(cephes.chndtrix(0,1,0),0.0) + + def test_cosdg(self): + assert_equal(cephes.cosdg(0),1.0) + + def test_cosm1(self): + assert_equal(cephes.cosm1(0),0.0) + + def test_cotdg(self): + assert_almost_equal(cephes.cotdg(45),1.0) + + def test_dawsn(self): + assert_equal(cephes.dawsn(0),0.0) + assert_allclose(cephes.dawsn(1.23), 0.50053727749081767) + + def test_diric(self): + # Test behavior near multiples of 2pi. Regression test for issue + # described in gh-4001. + n_odd = [1, 5, 25] + x = np.array(2*np.pi + 5e-5).astype(np.float32) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7) + x = np.array(2*np.pi + 1e-9).astype(np.float64) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15) + x = np.array(2*np.pi + 1e-15).astype(np.float64) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15) + if hasattr(np, 'float128'): + # No float128 available in 32-bit numpy + x = np.array(2*np.pi + 1e-12).astype(np.float128) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19) + + n_even = [2, 4, 24] + x = np.array(2*np.pi + 1e-9).astype(np.float64) + assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15) + + # Test at some values not near a multiple of pi + x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi) + octave_result = [0.872677996249965, 0.539344662916632, + 0.127322003750035, -0.206011329583298] + assert_almost_equal(special.diric(x, 3), octave_result, decimal=15) + + def test_diric_broadcasting(self): + x = np.arange(5) + n = np.array([1, 3, 7]) + assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size)) + + def test_ellipe(self): + assert_equal(cephes.ellipe(1),1.0) + + def test_ellipeinc(self): + assert_equal(cephes.ellipeinc(0,1),0.0) + + def test_ellipj(self): + cephes.ellipj(0,1) + + def test_ellipk(self): + assert_allclose(ellipk(0), pi/2) + + def test_ellipkinc(self): + assert_equal(cephes.ellipkinc(0,0),0.0) + + def test_erf(self): + assert_equal(cephes.erf(0), 0.0) + + def test_erf_symmetry(self): + x = 5.905732037710919 + assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0) + + def test_erfc(self): + assert_equal(cephes.erfc(0), 1.0) + + def test_exp10(self): + assert_approx_equal(cephes.exp10(2),100.0) + + def test_exp2(self): + assert_equal(cephes.exp2(2),4.0) + + def test_expm1(self): + assert_equal(cephes.expm1(0),0.0) + assert_equal(cephes.expm1(np.inf), np.inf) + assert_equal(cephes.expm1(-np.inf), -1) + assert_equal(cephes.expm1(np.nan), np.nan) + + def test_expm1_complex(self): + expm1 = cephes.expm1 + assert_equal(expm1(0 + 0j), 0 + 0j) + assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0)) + assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf)) + assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf)) + assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf)) + assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf)) + assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan)) + assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0)) + assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0)) + assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan)) + assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan)) + + @pytest.mark.xfail(reason='The real part of expm1(z) bad at these points') + def test_expm1_complex_hard(self): + # The real part of this function is difficult to evaluate when + # z.real = -log(cos(z.imag)). + y = np.array([0.1, 0.2, 0.3, 5, 11, 20]) + x = -np.log(np.cos(y)) + z = x + 1j*y + + # evaluate using mpmath.expm1 with dps=1000 + expected = np.array([-5.5507901846769623e-17+0.10033467208545054j, + 2.4289354732893695e-18+0.20271003550867248j, + 4.5235500262585768e-17+0.30933624960962319j, + 7.8234305217489006e-17-3.3805150062465863j, + -1.3685191953697676e-16-225.95084645419513j, + 8.7175620481291045e-17+2.2371609442247422j]) + found = cephes.expm1(z) + # this passes. + assert_array_almost_equal_nulp(found.imag, expected.imag, 3) + # this fails. + assert_array_almost_equal_nulp(found.real, expected.real, 20) + + def test_fdtr(self): + assert_equal(cephes.fdtr(1, 1, 0), 0.0) + # Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10] + assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488, + rtol=1e-12) + + def test_fdtrc(self): + assert_equal(cephes.fdtrc(1, 1, 0), 1.0) + # Computed using Wolfram Alpha: + # 1 - CDF[FRatioDistribution[2, 1/10], 1e10] + assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512, + rtol=1e-12) + + def test_fdtri(self): + assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]), + array([0.9937365, 1.00630298]), rtol=1e-6) + # From Wolfram Alpha: + # CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874... + p = 0.8756751669632105666874 + assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12) + + @pytest.mark.xfail(reason='Returns nan on i686.') + def test_fdtri_mysterious_failure(self): + assert_allclose(cephes.fdtri(1, 1, 0.5), 1) + + def test_fdtridfd(self): + assert_equal(cephes.fdtridfd(1,0,0),5.0) + + def test_fresnel(self): + assert_equal(cephes.fresnel(0),(0.0,0.0)) + + def test_gamma(self): + assert_equal(cephes.gamma(5),24.0) + + def test_gammainccinv(self): + assert_equal(cephes.gammainccinv(5,1),0.0) + + def test_gammaln(self): + cephes.gammaln(10) + + def test_gammasgn(self): + vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64) + assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals))) + + def test_gdtr(self): + assert_equal(cephes.gdtr(1,1,0),0.0) + + def test_gdtr_inf(self): + assert_equal(cephes.gdtr(1,1,np.inf),1.0) + + def test_gdtrc(self): + assert_equal(cephes.gdtrc(1,1,0),1.0) + + def test_gdtria(self): + assert_equal(cephes.gdtria(0,1,1),0.0) + + def test_gdtrib(self): + cephes.gdtrib(1,0,1) + # assert_equal(cephes.gdtrib(1,0,1),5.0) + + def test_gdtrix(self): + cephes.gdtrix(1,1,.1) + + def test_hankel1(self): + cephes.hankel1(1,1) + + def test_hankel1e(self): + cephes.hankel1e(1,1) + + def test_hankel2(self): + cephes.hankel2(1,1) + + def test_hankel2e(self): + cephes.hankel2e(1,1) + + def test_hyp1f1(self): + assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0)) + assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095) + cephes.hyp1f1(1,1,1) + + def test_hyp2f1(self): + assert_equal(cephes.hyp2f1(1,1,1,0),1.0) + + def test_i0(self): + assert_equal(cephes.i0(0),1.0) + + def test_i0e(self): + assert_equal(cephes.i0e(0),1.0) + + def test_i1(self): + assert_equal(cephes.i1(0),0.0) + + def test_i1e(self): + assert_equal(cephes.i1e(0),0.0) + + def test_it2i0k0(self): + cephes.it2i0k0(1) + + def test_it2j0y0(self): + cephes.it2j0y0(1) + + def test_it2struve0(self): + cephes.it2struve0(1) + + def test_itairy(self): + cephes.itairy(1) + + def test_iti0k0(self): + assert_equal(cephes.iti0k0(0),(0.0,0.0)) + + def test_itj0y0(self): + assert_equal(cephes.itj0y0(0),(0.0,0.0)) + + def test_itmodstruve0(self): + assert_equal(cephes.itmodstruve0(0),0.0) + + def test_itstruve0(self): + assert_equal(cephes.itstruve0(0),0.0) + + def test_iv(self): + assert_equal(cephes.iv(1,0),0.0) + + def test_ive(self): + assert_equal(cephes.ive(1,0),0.0) + + def test_j0(self): + assert_equal(cephes.j0(0),1.0) + + def test_j1(self): + assert_equal(cephes.j1(0),0.0) + + def test_jn(self): + assert_equal(cephes.jn(0,0),1.0) + + def test_jv(self): + assert_equal(cephes.jv(0,0),1.0) + + def test_jve(self): + assert_equal(cephes.jve(0,0),1.0) + + def test_k0(self): + cephes.k0(2) + + def test_k0e(self): + cephes.k0e(2) + + def test_k1(self): + cephes.k1(2) + + def test_k1e(self): + cephes.k1e(2) + + def test_kei(self): + cephes.kei(2) + + def test_keip(self): + assert_equal(cephes.keip(0),0.0) + + def test_ker(self): + cephes.ker(2) + + def test_kerp(self): + cephes.kerp(2) + + def test_kelvin(self): + cephes.kelvin(2) + + def test_kn(self): + cephes.kn(1,1) + + def test_kolmogi(self): + assert_equal(cephes.kolmogi(1),0.0) + assert_(np.isnan(cephes.kolmogi(np.nan))) + + def test_kolmogorov(self): + assert_equal(cephes.kolmogorov(0), 1.0) + + def test_kolmogp(self): + assert_equal(cephes._kolmogp(0), -0.0) + + def test_kolmogc(self): + assert_equal(cephes._kolmogc(0), 0.0) + + def test_kolmogci(self): + assert_equal(cephes._kolmogci(0), 0.0) + assert_(np.isnan(cephes._kolmogci(np.nan))) + + def test_kv(self): + cephes.kv(1,1) + + def test_kve(self): + cephes.kve(1,1) + + def test_log1p(self): + log1p = cephes.log1p + assert_equal(log1p(0), 0.0) + assert_equal(log1p(-1), -np.inf) + assert_equal(log1p(-2), np.nan) + assert_equal(log1p(np.inf), np.inf) + + def test_log1p_complex(self): + log1p = cephes.log1p + c = complex + assert_equal(log1p(0 + 0j), 0 + 0j) + assert_equal(log1p(c(-1, 0)), c(-np.inf, 0)) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2)) + assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan)) + assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi)) + assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0)) + assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4)) + assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4)) + assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan)) + assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan)) + assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan)) + assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan)) + assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan)) + + def test_lpmv(self): + assert_equal(cephes.lpmv(0,0,1),1.0) + + def test_mathieu_a(self): + assert_equal(cephes.mathieu_a(1,0),1.0) + + def test_mathieu_b(self): + assert_equal(cephes.mathieu_b(1,0),1.0) + + def test_mathieu_cem(self): + assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0)) + + # Test AMS 20.2.27 + @np.vectorize + def ce_smallq(m, q, z): + z *= np.pi/180 + if m == 0: + # + O(q^2) + return 2**(-0.5) * (1 - .5*q*cos(2*z)) + elif m == 1: + # + O(q^2) + return cos(z) - q/8 * cos(3*z) + elif m == 2: + # + O(q^2) + return cos(2*z) - q*(cos(4*z)/12 - 1/4) + else: + # + O(q^2) + return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) + m = np.arange(0, 100) + q = np.r_[0, np.logspace(-30, -9, 10)] + assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0], + ce_smallq(m[:,None], q[None,:], 0.123), + rtol=1e-14, atol=0) + + def test_mathieu_sem(self): + assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0)) + + # Test AMS 20.2.27 + @np.vectorize + def se_smallq(m, q, z): + z *= np.pi/180 + if m == 1: + # + O(q^2) + return sin(z) - q/8 * sin(3*z) + elif m == 2: + # + O(q^2) + return sin(2*z) - q*sin(4*z)/12 + else: + # + O(q^2) + return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) + m = np.arange(1, 100) + q = np.r_[0, np.logspace(-30, -9, 10)] + assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0], + se_smallq(m[:,None], q[None,:], 0.123), + rtol=1e-14, atol=0) + + def test_mathieu_modcem1(self): + assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0)) + + def test_mathieu_modcem2(self): + cephes.mathieu_modcem2(1,1,1) + + # Test reflection relation AMS 20.6.19 + m = np.arange(0, 4)[:,None,None] + q = np.r_[np.logspace(-2, 2, 10)][None,:,None] + z = np.linspace(0, 1, 7)[None,None,:] + + y1 = cephes.mathieu_modcem2(m, q, -z)[0] + + fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0] + y2 = (-cephes.mathieu_modcem2(m, q, z)[0] + - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]) + + assert_allclose(y1, y2, rtol=1e-10) + + def test_mathieu_modsem1(self): + assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0)) + + def test_mathieu_modsem2(self): + cephes.mathieu_modsem2(1,1,1) + + # Test reflection relation AMS 20.6.20 + m = np.arange(1, 4)[:,None,None] + q = np.r_[np.logspace(-2, 2, 10)][None,:,None] + z = np.linspace(0, 1, 7)[None,None,:] + + y1 = cephes.mathieu_modsem2(m, q, -z)[0] + fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1] + y2 = (cephes.mathieu_modsem2(m, q, z)[0] + - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]) + assert_allclose(y1, y2, rtol=1e-10) + + def test_mathieu_overflow(self): + # Check that these return NaNs instead of causing a SEGV + assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan)) + + def test_mathieu_ticket_1847(self): + # Regression test --- this call had some out-of-bounds access + # and could return nan occasionally + for k in range(60): + v = cephes.mathieu_modsem2(2, 100, -1) + # Values from ACM TOMS 804 (derivate by numerical differentiation) + assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10) + assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4) + + def test_modfresnelm(self): + cephes.modfresnelm(0) + + def test_modfresnelp(self): + cephes.modfresnelp(0) + + def test_modstruve(self): + assert_equal(cephes.modstruve(1,0),0.0) + + def test_nbdtr(self): + assert_equal(cephes.nbdtr(1,1,1),1.0) + + def test_nbdtrc(self): + assert_equal(cephes.nbdtrc(1,1,1),0.0) + + def test_nbdtri(self): + assert_equal(cephes.nbdtri(1,1,1),1.0) + + def test_nbdtrik(self): + cephes.nbdtrik(1,.4,.5) + + def test_nbdtrin(self): + assert_equal(cephes.nbdtrin(1,0,0),5.0) + + def test_ncfdtr(self): + assert_equal(cephes.ncfdtr(1,1,1,0),0.0) + + def test_ncfdtri(self): + assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0) + f = [0.5, 1, 1.5] + p = cephes.ncfdtr(2, 3, 1.5, f) + assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f) + + def test_ncfdtridfd(self): + dfd = [1, 2, 3] + p = cephes.ncfdtr(2, dfd, 0.25, 15) + assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd) + + def test_ncfdtridfn(self): + dfn = [0.1, 1, 2, 3, 1e4] + p = cephes.ncfdtr(dfn, 2, 0.25, 15) + assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5) + + def test_ncfdtrinc(self): + nc = [0.5, 1.5, 2.0] + p = cephes.ncfdtr(2, 3, nc, 15) + assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc) + + def test_nctdtr(self): + assert_equal(cephes.nctdtr(1,0,0),0.5) + assert_equal(cephes.nctdtr(9, 65536, 45), 0.0) + + assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5) + assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.))) + assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.) + + assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.))) + assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.))) + assert_(np.isnan(cephes.nctdtr(2., 1., np.nan))) + + def test_nctdtridf(self): + cephes.nctdtridf(1,0.5,0) + + def test_nctdtrinc(self): + cephes.nctdtrinc(1,0,0) + + def test_nctdtrit(self): + cephes.nctdtrit(.1,0.2,.5) + + def test_nrdtrimn(self): + assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0) + + def test_nrdtrisd(self): + assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0, + atol=0, rtol=0) + + def test_obl_ang1(self): + cephes.obl_ang1(1,1,1,0) + + def test_obl_ang1_cv(self): + result = cephes.obl_ang1_cv(1,1,1,1,0) + assert_almost_equal(result[0],1.0) + assert_almost_equal(result[1],0.0) + + def test_obl_cv(self): + assert_equal(cephes.obl_cv(1,1,0),2.0) + + def test_obl_rad1(self): + cephes.obl_rad1(1,1,1,0) + + def test_obl_rad1_cv(self): + cephes.obl_rad1_cv(1,1,1,1,0) + + def test_obl_rad2(self): + cephes.obl_rad2(1,1,1,0) + + def test_obl_rad2_cv(self): + cephes.obl_rad2_cv(1,1,1,1,0) + + def test_pbdv(self): + assert_equal(cephes.pbdv(1,0),(0.0,1.0)) + + def test_pbvv(self): + cephes.pbvv(1,0) + + def test_pbwa(self): + cephes.pbwa(1,0) + + def test_pdtr(self): + val = cephes.pdtr(0, 1) + assert_almost_equal(val, np.exp(-1)) + # Edge case: m = 0. + val = cephes.pdtr([0, 1, 2], 0) + assert_array_equal(val, [1, 1, 1]) + + def test_pdtrc(self): + val = cephes.pdtrc(0, 1) + assert_almost_equal(val, 1 - np.exp(-1)) + # Edge case: m = 0. + val = cephes.pdtrc([0, 1, 2], 0.0) + assert_array_equal(val, [0, 0, 0]) + + def test_pdtri(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "floating point number truncated to an integer") + cephes.pdtri(0.5,0.5) + + def test_pdtrik(self): + k = cephes.pdtrik(0.5, 1) + assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5) + # Edge case: m = 0 or very small. + k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6]) + assert_array_equal(k, np.zeros((3, 3))) + + def test_pro_ang1(self): + cephes.pro_ang1(1,1,1,0) + + def test_pro_ang1_cv(self): + assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0), + array((1.0,0.0))) + + def test_pro_cv(self): + assert_equal(cephes.pro_cv(1,1,0),2.0) + + def test_pro_rad1(self): + cephes.pro_rad1(1,1,1,0.1) + + def test_pro_rad1_cv(self): + cephes.pro_rad1_cv(1,1,1,1,0) + + def test_pro_rad2(self): + cephes.pro_rad2(1,1,1,0) + + def test_pro_rad2_cv(self): + cephes.pro_rad2_cv(1,1,1,1,0) + + def test_psi(self): + cephes.psi(1) + + def test_radian(self): + assert_equal(cephes.radian(0,0,0),0) + + def test_rgamma(self): + assert_equal(cephes.rgamma(1),1.0) + + def test_round(self): + assert_equal(cephes.round(3.4),3.0) + assert_equal(cephes.round(-3.4),-3.0) + assert_equal(cephes.round(3.6),4.0) + assert_equal(cephes.round(-3.6),-4.0) + assert_equal(cephes.round(3.5),4.0) + assert_equal(cephes.round(-3.5),-4.0) + + def test_shichi(self): + cephes.shichi(1) + + def test_sici(self): + cephes.sici(1) + + s, c = cephes.sici(np.inf) + assert_almost_equal(s, np.pi * 0.5) + assert_almost_equal(c, 0) + + s, c = cephes.sici(-np.inf) + assert_almost_equal(s, -np.pi * 0.5) + assert_(np.isnan(c), "cosine integral(-inf) is not nan") + + def test_sindg(self): + assert_equal(cephes.sindg(90),1.0) + + def test_smirnov(self): + assert_equal(cephes.smirnov(1,.1),0.9) + assert_(np.isnan(cephes.smirnov(1,np.nan))) + + def test_smirnovp(self): + assert_equal(cephes._smirnovp(1, .1), -1) + assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1)) + assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1)) + assert_(np.isnan(cephes._smirnovp(1, np.nan))) + + def test_smirnovc(self): + assert_equal(cephes._smirnovc(1,.1),0.1) + assert_(np.isnan(cephes._smirnovc(1,np.nan))) + x10 = np.linspace(0, 1, 11, endpoint=True) + assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10)) + x4 = np.linspace(0, 1, 5, endpoint=True) + assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4)) + + def test_smirnovi(self): + assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4) + assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6) + assert_(np.isnan(cephes.smirnovi(1,np.nan))) + + def test_smirnovci(self): + assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4) + assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6) + assert_(np.isnan(cephes._smirnovci(1,np.nan))) + + def test_spence(self): + assert_equal(cephes.spence(1),0.0) + + def test_stdtr(self): + assert_equal(cephes.stdtr(1,0),0.5) + assert_almost_equal(cephes.stdtr(1,1), 0.75) + assert_almost_equal(cephes.stdtr(1,2), 0.852416382349) + + def test_stdtridf(self): + cephes.stdtridf(0.7,1) + + def test_stdtrit(self): + cephes.stdtrit(1,0.7) + + def test_struve(self): + assert_equal(cephes.struve(0,0),0.0) + + def test_tandg(self): + assert_equal(cephes.tandg(45),1.0) + + def test_tklmbda(self): + assert_almost_equal(cephes.tklmbda(1,1),1.0) + + def test_y0(self): + cephes.y0(1) + + def test_y1(self): + cephes.y1(1) + + def test_yn(self): + cephes.yn(1,1) + + def test_yv(self): + cephes.yv(1,1) + + def test_yve(self): + cephes.yve(1,1) + + def test_wofz(self): + z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.), + complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.), + complex(-0.0000000234545,1.1234), complex(-3.,5.1), + complex(-53,30.1), complex(0.0,0.12345), + complex(11,1), complex(-22,-2), complex(9,-28), + complex(21,-33), complex(1e5,1e5), complex(1e14,1e14) + ] + w = [ + complex(-3.78270245518980507452677445620103199303131110e-7, + 0.000903861276433172057331093754199933411710053155), + complex(0.1764906227004816847297495349730234591778719532788, + -0.02146550539468457616788719893991501311573031095617), + complex(0.2410250715772692146133539023007113781272362309451, + 0.06087579663428089745895459735240964093522265589350), + complex(0.30474420525691259245713884106959496013413834051768, + -0.20821893820283162728743734725471561394145872072738), + complex(7.317131068972378096865595229600561710140617977e34, + 8.321873499714402777186848353320412813066170427e34), + complex(0.0615698507236323685519612934241429530190806818395, + -0.00676005783716575013073036218018565206070072304635), + complex(0.3960793007699874918961319170187598400134746631, + -5.593152259116644920546186222529802777409274656e-9), + complex(0.08217199226739447943295069917990417630675021771804, + -0.04701291087643609891018366143118110965272615832184), + complex(0.00457246000350281640952328010227885008541748668738, + -0.00804900791411691821818731763401840373998654987934), + complex(0.8746342859608052666092782112565360755791467973338452, + 0.), + complex(0.00468190164965444174367477874864366058339647648741, + 0.0510735563901306197993676329845149741675029197050), + complex(-0.0023193175200187620902125853834909543869428763219, + -0.025460054739731556004902057663500272721780776336), + complex(9.11463368405637174660562096516414499772662584e304, + 3.97101807145263333769664875189354358563218932e305), + complex(-4.4927207857715598976165541011143706155432296e281, + -2.8019591213423077494444700357168707775769028e281), + complex(2.820947917809305132678577516325951485807107151e-6, + 2.820947917668257736791638444590253942253354058e-6), + complex(2.82094791773878143474039725787438662716372268e-15, + 2.82094791773878143474039725773333923127678361e-15) + ] + assert_func_equal(cephes.wofz, w, z, rtol=1e-13) + + +class TestAiry: + def test_airy(self): + # This tests the airy function to ensure 8 place accuracy in computation + + x = special.airy(.99) + assert_array_almost_equal( + x, + array([0.13689066,-0.16050153,1.19815925,0.92046818]), + 8, + ) + x = special.airy(.41) + assert_array_almost_equal( + x, + array([0.25238916,-.23480512,0.80686202,0.51053919]), + 8, + ) + x = special.airy(-.36) + assert_array_almost_equal( + x, + array([0.44508477,-0.23186773,0.44939534,0.48105354]), + 8, + ) + + def test_airye(self): + a = special.airye(0.01) + b = special.airy(0.01) + b1 = [None]*4 + for n in range(2): + b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01)) + for n in range(2,4): + b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01)))) + assert_array_almost_equal(a,b1,6) + + def test_bi_zeros(self): + bi = special.bi_zeros(2) + bia = (array([-1.17371322, -3.2710930]), + array([-2.29443968, -4.07315509]), + array([-0.45494438, 0.39652284]), + array([0.60195789, -0.76031014])) + assert_array_almost_equal(bi,bia,4) + + bi = special.bi_zeros(5) + assert_array_almost_equal(bi[0],array([-1.173713222709127, + -3.271093302836352, + -4.830737841662016, + -6.169852128310251, + -7.376762079367764]),11) + + assert_array_almost_equal(bi[1],array([-2.294439682614122, + -4.073155089071828, + -5.512395729663599, + -6.781294445990305, + -7.940178689168587]),10) + + assert_array_almost_equal(bi[2],array([-0.454944383639657, + 0.396522836094465, + -0.367969161486959, + 0.349499116831805, + -0.336026240133662]),11) + + assert_array_almost_equal(bi[3],array([0.601957887976239, + -0.760310141492801, + 0.836991012619261, + -0.88947990142654, + 0.929983638568022]),10) + + def test_ai_zeros(self): + ai = special.ai_zeros(1) + assert_array_almost_equal(ai,(array([-2.33810741]), + array([-1.01879297]), + array([0.5357]), + array([0.7012])),4) + + @pytest.mark.fail_slow(2) + def test_ai_zeros_big(self): + z, zp, ai_zpx, aip_zx = special.ai_zeros(50000) + ai_z, aip_z, _, _ = special.airy(z) + ai_zp, aip_zp, _, _ = special.airy(zp) + + ai_envelope = 1/abs(z)**(1./4) + aip_envelope = abs(zp)**(1./4) + + # Check values + assert_allclose(ai_zpx, ai_zp, rtol=1e-10) + assert_allclose(aip_zx, aip_z, rtol=1e-10) + + # Check they are zeros + assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0) + assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0) + + # Check first zeros, DLMF 9.9.1 + assert_allclose(z[:6], + [-2.3381074105, -4.0879494441, -5.5205598281, + -6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10) + assert_allclose(zp[:6], + [-1.0187929716, -3.2481975822, -4.8200992112, + -6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10) + + @pytest.mark.fail_slow(2) + def test_bi_zeros_big(self): + z, zp, bi_zpx, bip_zx = special.bi_zeros(50000) + _, _, bi_z, bip_z = special.airy(z) + _, _, bi_zp, bip_zp = special.airy(zp) + + bi_envelope = 1/abs(z)**(1./4) + bip_envelope = abs(zp)**(1./4) + + # Check values + assert_allclose(bi_zpx, bi_zp, rtol=1e-10) + assert_allclose(bip_zx, bip_z, rtol=1e-10) + + # Check they are zeros + assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0) + assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0) + + # Check first zeros, DLMF 9.9.2 + assert_allclose(z[:6], + [-1.1737132227, -3.2710933028, -4.8307378417, + -6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10) + assert_allclose(zp[:6], + [-2.2944396826, -4.0731550891, -5.5123957297, + -6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10) + + +class TestAssocLaguerre: + def test_assoc_laguerre(self): + a1 = special.genlaguerre(11,1) + a2 = special.assoc_laguerre(.2,11,1) + assert_array_almost_equal(a2,a1(.2),8) + a2 = special.assoc_laguerre(1,11,1) + assert_array_almost_equal(a2,a1(1),8) + + +class TestBesselpoly: + def test_besselpoly(self): + pass + + +class TestKelvin: + def test_bei(self): + mbei = special.bei(2) + assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact + + def test_beip(self): + mbeip = special.beip(2) + assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact + + def test_ber(self): + mber = special.ber(2) + assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact + + def test_berp(self): + mberp = special.berp(2) + assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact + + def test_bei_zeros(self): + # Abramowitz & Stegun, Table 9.12 + bi = special.bei_zeros(5) + assert_array_almost_equal(bi,array([5.02622, + 9.45541, + 13.89349, + 18.33398, + 22.77544]),4) + + def test_beip_zeros(self): + bip = special.beip_zeros(5) + assert_array_almost_equal(bip,array([3.772673304934953, + 8.280987849760042, + 12.742147523633703, + 17.193431752512542, + 21.641143941167325]),8) + + def test_ber_zeros(self): + ber = special.ber_zeros(5) + assert_array_almost_equal(ber,array([2.84892, + 7.23883, + 11.67396, + 16.11356, + 20.55463]),4) + + def test_berp_zeros(self): + brp = special.berp_zeros(5) + assert_array_almost_equal(brp,array([6.03871, + 10.51364, + 14.96844, + 19.41758, + 23.86430]),4) + + def test_kelvin(self): + mkelv = special.kelvin(2) + assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j, + special.ker(2) + special.kei(2)*1j, + special.berp(2) + special.beip(2)*1j, + special.kerp(2) + special.keip(2)*1j),8) + + def test_kei(self): + mkei = special.kei(2) + assert_almost_equal(mkei,-0.20240006776470432,5) + + def test_keip(self): + mkeip = special.keip(2) + assert_almost_equal(mkeip,0.21980790991960536,5) + + def test_ker(self): + mker = special.ker(2) + assert_almost_equal(mker,-0.041664513991509472,5) + + def test_kerp(self): + mkerp = special.kerp(2) + assert_almost_equal(mkerp,-0.10660096588105264,5) + + def test_kei_zeros(self): + kei = special.kei_zeros(5) + assert_array_almost_equal(kei,array([3.91467, + 8.34422, + 12.78256, + 17.22314, + 21.66464]),4) + + def test_keip_zeros(self): + keip = special.keip_zeros(5) + assert_array_almost_equal(keip,array([4.93181, + 9.40405, + 13.85827, + 18.30717, + 22.75379]),4) + + # numbers come from 9.9 of A&S pg. 381 + def test_kelvin_zeros(self): + tmp = special.kelvin_zeros(5) + berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp + assert_array_almost_equal(berz,array([2.84892, + 7.23883, + 11.67396, + 16.11356, + 20.55463]),4) + assert_array_almost_equal(beiz,array([5.02622, + 9.45541, + 13.89349, + 18.33398, + 22.77544]),4) + assert_array_almost_equal(kerz,array([1.71854, + 6.12728, + 10.56294, + 15.00269, + 19.44382]),4) + assert_array_almost_equal(keiz,array([3.91467, + 8.34422, + 12.78256, + 17.22314, + 21.66464]),4) + assert_array_almost_equal(berpz,array([6.03871, + 10.51364, + 14.96844, + 19.41758, + 23.86430]),4) + assert_array_almost_equal(beipz,array([3.77267, + # table from 1927 had 3.77320 + # but this is more accurate + 8.28099, + 12.74215, + 17.19343, + 21.64114]),4) + assert_array_almost_equal(kerpz,array([2.66584, + 7.17212, + 11.63218, + 16.08312, + 20.53068]),4) + assert_array_almost_equal(keipz,array([4.93181, + 9.40405, + 13.85827, + 18.30717, + 22.75379]),4) + + def test_ker_zeros(self): + ker = special.ker_zeros(5) + assert_array_almost_equal(ker,array([1.71854, + 6.12728, + 10.56294, + 15.00269, + 19.44381]),4) + + def test_kerp_zeros(self): + kerp = special.kerp_zeros(5) + assert_array_almost_equal(kerp,array([2.66584, + 7.17212, + 11.63218, + 16.08312, + 20.53068]),4) + + +class TestBernoulli: + def test_bernoulli(self): + brn = special.bernoulli(5) + assert_array_almost_equal(brn,array([1.0000, + -0.5000, + 0.1667, + 0.0000, + -0.0333, + 0.0000]),4) + + +class TestBeta: + """ + Test beta and betaln. + """ + + def test_beta(self): + assert_equal(special.beta(1, 1), 1.0) + assert_allclose(special.beta(-100.3, 1e-200), special.gamma(1e-200)) + assert_allclose(special.beta(0.0342, 171), 24.070498359873497, + rtol=1e-13, atol=0) + + bet = special.beta(2, 4) + betg = (special.gamma(2)*special.gamma(4))/special.gamma(6) + assert_allclose(bet, betg, rtol=1e-13) + + def test_beta_inf(self): + assert_(np.isinf(special.beta(-1, 2))) + + def test_betaln(self): + assert_equal(special.betaln(1, 1), 0.0) + assert_allclose(special.betaln(-100.3, 1e-200), + special.gammaln(1e-200)) + assert_allclose(special.betaln(0.0342, 170), 3.1811881124242447, + rtol=1e-14, atol=0) + + betln = special.betaln(2, 4) + bet = log(abs(special.beta(2, 4))) + assert_allclose(betln, bet, rtol=1e-13) + + +class TestBetaInc: + """ + Tests for betainc, betaincinv, betaincc, betainccinv. + """ + + def test_a1_b1(self): + # betainc(1, 1, x) is x. + x = np.array([0, 0.25, 1]) + assert_equal(special.betainc(1, 1, x), x) + assert_equal(special.betaincinv(1, 1, x), x) + assert_equal(special.betaincc(1, 1, x), 1 - x) + assert_equal(special.betainccinv(1, 1, x), 1 - x) + + # Nontrivial expected values computed with mpmath: + # from mpmath import mp + # mp.dps = 100 + # p = mp.betainc(a, b, 0, x, regularized=True) + # + # or, e.g., + # + # p = 0.25 + # a, b = 0.0342, 171 + # x = mp.findroot( + # lambda t: mp.betainc(a, b, 0, t, regularized=True) - p, + # (8e-21, 9e-21), + # solver='anderson', + # ) + # + @pytest.mark.parametrize( + 'a, b, x, p', + [(2, 4, 0.3138101704556974, 0.5), + (0.0342, 171.0, 1e-10, 0.552699169018070910641), + # gh-3761: + (0.0342, 171, 8.42313169354797e-21, 0.25), + # gh-4244: + (0.0002742794749792665, 289206.03125, 1.639984034231756e-56, + 0.9688708782196045), + # gh-12796: + (4, 99997, 0.0001947841578892121, 0.999995)]) + def test_betainc_betaincinv(self, a, b, x, p): + p1 = special.betainc(a, b, x) + assert_allclose(p1, p, rtol=1e-15) + x1 = special.betaincinv(a, b, p) + assert_allclose(x1, x, rtol=5e-13) + + # Expected values computed with mpmath: + # from mpmath import mp + # mp.dps = 100 + # p = mp.betainc(a, b, x, 1, regularized=True) + @pytest.mark.parametrize('a, b, x, p', + [(2.5, 3.0, 0.25, 0.833251953125), + (7.5, 13.25, 0.375, 0.43298734645560368593), + (0.125, 7.5, 0.425, 0.0006688257851314237), + (0.125, 18.0, 1e-6, 0.72982359145096327654), + (0.125, 18.0, 0.996, 7.2745875538380150586e-46), + (0.125, 24.0, 0.75, 3.70853404816862016966e-17), + (16.0, 0.75, 0.99999999975, + 5.4408759277418629909e-07), + # gh-4677 (numbers from stackoverflow question): + (0.4211959643503401, 16939.046996018118, + 0.000815296167195521, 1e-7)]) + def test_betaincc_betainccinv(self, a, b, x, p): + p1 = special.betaincc(a, b, x) + assert_allclose(p1, p, rtol=5e-15) + x1 = special.betainccinv(a, b, p) + assert_allclose(x1, x, rtol=8e-15) + + @pytest.mark.parametrize( + 'a, b, y, ref', + [(14.208308325339239, 14.208308325339239, 7.703145458496392e-307, + 8.566004561846704e-23), + (14.0, 14.5, 1e-280, 2.9343915006642424e-21), + (3.5, 15.0, 4e-95, 1.3290751429289227e-28), + (10.0, 1.25, 2e-234, 3.982659092143654e-24), + (4.0, 99997.0, 5e-88, 3.309800566862242e-27)] + ) + def test_betaincinv_tiny_y(self, a, b, y, ref): + # Test with extremely small y values. This test includes + # a regression test for an issue in the boost code; + # see https://github.com/boostorg/math/issues/961 + # + # The reference values were computed with mpmath. For example, + # + # from mpmath import mp + # mp.dps = 1000 + # a = 14.208308325339239 + # p = 7.703145458496392e-307 + # x = mp.findroot(lambda t: mp.betainc(a, a, 0, t, + # regularized=True) - p, + # x0=8.566e-23) + # print(float(x)) + # + x = special.betaincinv(a, b, y) + assert_allclose(x, ref, rtol=1e-14) + + @pytest.mark.parametrize('func', [special.betainc, special.betaincinv, + special.betaincc, special.betainccinv]) + @pytest.mark.parametrize('args', [(-1.0, 2, 0.5), (0, 2, 0.5), + (1.5, -2.0, 0.5), (1.5, 0, 0.5), + (1.5, 2.0, -0.3), (1.5, 2.0, 1.1)]) + def test_betainc_domain_errors(self, func, args): + with special.errstate(domain='raise'): + with pytest.raises(special.SpecialFunctionError, match='domain'): + special.betainc(*args) + + +class TestCombinatorics: + def test_comb(self): + assert_allclose(special.comb([10, 10], [3, 4]), [120., 210.]) + assert_allclose(special.comb(10, 3), 120.) + assert_equal(special.comb(10, 3, exact=True), 120) + assert_equal(special.comb(10, 3, exact=True, repetition=True), 220) + + assert_allclose([special.comb(20, k, exact=True) for k in range(21)], + special.comb(20, list(range(21))), atol=1e-15) + + ii = np.iinfo(int).max + 1 + assert_equal(special.comb(ii, ii-1, exact=True), ii) + + expected = 100891344545564193334812497256 + assert special.comb(100, 50, exact=True) == expected + + def test_comb_with_np_int64(self): + n = 70 + k = 30 + np_n = np.int64(n) + np_k = np.int64(k) + res_np = special.comb(np_n, np_k, exact=True) + res_py = special.comb(n, k, exact=True) + assert res_np == res_py + + def test_comb_zeros(self): + assert_equal(special.comb(2, 3, exact=True), 0) + assert_equal(special.comb(-1, 3, exact=True), 0) + assert_equal(special.comb(2, -1, exact=True), 0) + assert_equal(special.comb(2, -1, exact=False), 0) + assert_allclose(special.comb([2, -1, 2, 10], [3, 3, -1, 3]), [0., 0., 0., 120.]) + + def test_comb_exact_non_int_dep(self): + msg = "`exact=True`" + with pytest.deprecated_call(match=msg): + special.comb(3.4, 4, exact=True) + + def test_perm(self): + assert_allclose(special.perm([10, 10], [3, 4]), [720., 5040.]) + assert_almost_equal(special.perm(10, 3), 720.) + assert_equal(special.perm(10, 3, exact=True), 720) + + def test_perm_zeros(self): + assert_equal(special.perm(2, 3, exact=True), 0) + assert_equal(special.perm(-1, 3, exact=True), 0) + assert_equal(special.perm(2, -1, exact=True), 0) + assert_equal(special.perm(2, -1, exact=False), 0) + assert_allclose(special.perm([2, -1, 2, 10], [3, 3, -1, 3]), [0., 0., 0., 720.]) + + def test_perm_iv(self): + # currently `exact=True` only support scalars + with pytest.raises(ValueError, match="scalar integers"): + special.perm([1, 2], [4, 5], exact=True) + + # Non-integral scalars with N < k, or N,k < 0 used to return 0, this is now + # deprecated and will raise an error in SciPy 1.16.0 + with pytest.deprecated_call(match="Non-integer"): + special.perm(4.6, 6, exact=True) + with pytest.deprecated_call(match="Non-integer"): + special.perm(-4.6, 3, exact=True) + with pytest.deprecated_call(match="Non-integer"): + special.perm(4, -3.9, exact=True) + + # Non-integral scalars which aren't included in the cases above an raise an + # error directly without deprecation as this code never worked + with pytest.raises(ValueError, match="Non-integer"): + special.perm(6.0, 4.6, exact=True) + + +class TestTrigonometric: + def test_cbrt(self): + cb = special.cbrt(27) + cbrl = 27**(1.0/3.0) + assert_approx_equal(cb,cbrl) + + def test_cbrtmore(self): + cb1 = special.cbrt(27.9) + cbrl1 = 27.9**(1.0/3.0) + assert_almost_equal(cb1,cbrl1,8) + + def test_cosdg(self): + cdg = special.cosdg(90) + cdgrl = cos(pi/2.0) + assert_almost_equal(cdg,cdgrl,8) + + def test_cosdgmore(self): + cdgm = special.cosdg(30) + cdgmrl = cos(pi/6.0) + assert_almost_equal(cdgm,cdgmrl,8) + + def test_cosm1(self): + cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10)) + csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1) + assert_array_almost_equal(cs,csrl,8) + + def test_cotdg(self): + ct = special.cotdg(30) + ctrl = tan(pi/6.0)**(-1) + assert_almost_equal(ct,ctrl,8) + + def test_cotdgmore(self): + ct1 = special.cotdg(45) + ctrl1 = tan(pi/4.0)**(-1) + assert_almost_equal(ct1,ctrl1,8) + + def test_specialpoints(self): + assert_almost_equal(special.cotdg(45), 1.0, 14) + assert_almost_equal(special.cotdg(-45), -1.0, 14) + assert_almost_equal(special.cotdg(90), 0.0, 14) + assert_almost_equal(special.cotdg(-90), 0.0, 14) + assert_almost_equal(special.cotdg(135), -1.0, 14) + assert_almost_equal(special.cotdg(-135), 1.0, 14) + assert_almost_equal(special.cotdg(225), 1.0, 14) + assert_almost_equal(special.cotdg(-225), -1.0, 14) + assert_almost_equal(special.cotdg(270), 0.0, 14) + assert_almost_equal(special.cotdg(-270), 0.0, 14) + assert_almost_equal(special.cotdg(315), -1.0, 14) + assert_almost_equal(special.cotdg(-315), 1.0, 14) + assert_almost_equal(special.cotdg(765), 1.0, 14) + + def test_sinc(self): + # the sinc implementation and more extensive sinc tests are in numpy + assert_array_equal(special.sinc([0]), 1) + assert_equal(special.sinc(0.0), 1.0) + + def test_sindg(self): + sn = special.sindg(90) + assert_equal(sn,1.0) + + def test_sindgmore(self): + snm = special.sindg(30) + snmrl = sin(pi/6.0) + assert_almost_equal(snm,snmrl,8) + snm1 = special.sindg(45) + snmrl1 = sin(pi/4.0) + assert_almost_equal(snm1,snmrl1,8) + + +class TestTandg: + + def test_tandg(self): + tn = special.tandg(30) + tnrl = tan(pi/6.0) + assert_almost_equal(tn,tnrl,8) + + def test_tandgmore(self): + tnm = special.tandg(45) + tnmrl = tan(pi/4.0) + assert_almost_equal(tnm,tnmrl,8) + tnm1 = special.tandg(60) + tnmrl1 = tan(pi/3.0) + assert_almost_equal(tnm1,tnmrl1,8) + + def test_specialpoints(self): + assert_almost_equal(special.tandg(0), 0.0, 14) + assert_almost_equal(special.tandg(45), 1.0, 14) + assert_almost_equal(special.tandg(-45), -1.0, 14) + assert_almost_equal(special.tandg(135), -1.0, 14) + assert_almost_equal(special.tandg(-135), 1.0, 14) + assert_almost_equal(special.tandg(180), 0.0, 14) + assert_almost_equal(special.tandg(-180), 0.0, 14) + assert_almost_equal(special.tandg(225), 1.0, 14) + assert_almost_equal(special.tandg(-225), -1.0, 14) + assert_almost_equal(special.tandg(315), -1.0, 14) + assert_almost_equal(special.tandg(-315), 1.0, 14) + + +class TestEllip: + def test_ellipj_nan(self): + """Regression test for #912.""" + special.ellipj(0.5, np.nan) + + def test_ellipj(self): + el = special.ellipj(0.2,0) + rel = [sin(0.2),cos(0.2),1.0,0.20] + assert_array_almost_equal(el,rel,13) + + def test_ellipk(self): + elk = special.ellipk(.2) + assert_almost_equal(elk,1.659623598610528,11) + + assert_equal(special.ellipkm1(0.0), np.inf) + assert_equal(special.ellipkm1(1.0), pi/2) + assert_equal(special.ellipkm1(np.inf), 0.0) + assert_equal(special.ellipkm1(np.nan), np.nan) + assert_equal(special.ellipkm1(-1), np.nan) + assert_allclose(special.ellipk(-10), 0.7908718902387385) + + def test_ellipkinc(self): + elkinc = special.ellipkinc(pi/2,.2) + elk = special.ellipk(0.2) + assert_almost_equal(elkinc,elk,15) + alpha = 20*pi/180 + phi = 45*pi/180 + m = sin(alpha)**2 + elkinc = special.ellipkinc(phi,m) + assert_almost_equal(elkinc,0.79398143,8) + # From pg. 614 of A & S + + assert_equal(special.ellipkinc(pi/2, 0.0), pi/2) + assert_equal(special.ellipkinc(pi/2, 1.0), np.inf) + assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0) + assert_equal(special.ellipkinc(pi/2, np.nan), np.nan) + assert_equal(special.ellipkinc(pi/2, 2), np.nan) + assert_equal(special.ellipkinc(0, 0.5), 0.0) + assert_equal(special.ellipkinc(np.inf, 0.5), np.inf) + assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf) + assert_equal(special.ellipkinc(np.inf, np.inf), np.nan) + assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan) + assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan) + assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan) + assert_equal(special.ellipkinc(np.nan, 0.5), np.nan) + assert_equal(special.ellipkinc(np.nan, np.nan), np.nan) + + assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14) + assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946) + + def test_ellipkinc_2(self): + # Regression test for gh-3550 + # ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value + mbad = 0.68359375000000011 + phi = 0.9272952180016123 + m = np.nextafter(mbad, 0) + mvals = [] + for j in range(10): + mvals.append(m) + m = np.nextafter(m, 1) + f = special.ellipkinc(phi, mvals) + assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1) + # this bug also appears at phi + n * pi for at least small n + f1 = special.ellipkinc(phi + pi, mvals) + assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2) + + def test_ellipkinc_singular(self): + # ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2) + xlog = np.logspace(-300, -17, 25) + xlin = np.linspace(1e-17, 0.1, 25) + xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False) + + assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), + rtol=1e14) + assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), + rtol=1e14) + assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), + rtol=1e14) + assert_equal(special.ellipkinc(np.pi/2, 1), np.inf) + assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), + rtol=1e14) + assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), + rtol=1e14) + assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), + rtol=1e14) + assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf) + + def test_ellipe(self): + ele = special.ellipe(.2) + assert_almost_equal(ele,1.4890350580958529,8) + + assert_equal(special.ellipe(0.0), pi/2) + assert_equal(special.ellipe(1.0), 1.0) + assert_equal(special.ellipe(-np.inf), np.inf) + assert_equal(special.ellipe(np.nan), np.nan) + assert_equal(special.ellipe(2), np.nan) + assert_allclose(special.ellipe(-10), 3.6391380384177689) + + def test_ellipeinc(self): + eleinc = special.ellipeinc(pi/2,.2) + ele = special.ellipe(0.2) + assert_almost_equal(eleinc,ele,14) + # pg 617 of A & S + alpha, phi = 52*pi/180,35*pi/180 + m = sin(alpha)**2 + eleinc = special.ellipeinc(phi,m) + assert_almost_equal(eleinc, 0.58823065, 8) + + assert_equal(special.ellipeinc(pi/2, 0.0), pi/2) + assert_equal(special.ellipeinc(pi/2, 1.0), 1.0) + assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf) + assert_equal(special.ellipeinc(pi/2, np.nan), np.nan) + assert_equal(special.ellipeinc(pi/2, 2), np.nan) + assert_equal(special.ellipeinc(0, 0.5), 0.0) + assert_equal(special.ellipeinc(np.inf, 0.5), np.inf) + assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf) + assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf) + assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf) + assert_equal(special.ellipeinc(np.inf, np.inf), np.nan) + assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan) + assert_equal(special.ellipeinc(np.nan, 0.5), np.nan) + assert_equal(special.ellipeinc(np.nan, np.nan), np.nan) + assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876) + + def test_ellipeinc_2(self): + # Regression test for gh-3550 + # ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value + mbad = 0.68359375000000011 + phi = 0.9272952180016123 + m = np.nextafter(mbad, 0) + mvals = [] + for j in range(10): + mvals.append(m) + m = np.nextafter(m, 1) + f = special.ellipeinc(phi, mvals) + assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2) + # this bug also appears at phi + n * pi for at least small n + f1 = special.ellipeinc(phi + pi, mvals) + assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4) + + +class TestEllipCarlson: + """Test for Carlson elliptic integrals ellipr[cdfgj]. + The special values used in these tests can be found in Sec. 3 of Carlson + (1994), https://arxiv.org/abs/math/9409227 + """ + def test_elliprc(self): + assert_allclose(elliprc(1, 1), 1) + assert elliprc(1, inf) == 0.0 + assert isnan(elliprc(1, 0)) + assert elliprc(1, complex(1, inf)) == 0.0 + args = array([[0.0, 0.25], + [2.25, 2.0], + [0.0, 1.0j], + [-1.0j, 1.0j], + [0.25, -2.0], + [1.0j, -1.0]]) + expected_results = array([np.pi, + np.log(2.0), + 1.1107207345396 * (1.0-1.0j), + 1.2260849569072-0.34471136988768j, + np.log(2.0) / 3.0, + 0.77778596920447+0.19832484993429j]) + for i, arr in enumerate(args): + assert_allclose(elliprc(*arr), expected_results[i]) + + def test_elliprd(self): + assert_allclose(elliprd(1, 1, 1), 1) + assert_allclose(elliprd(0, 2, 1) / 3.0, 0.59907011736779610371) + assert elliprd(1, 1, inf) == 0.0 + assert np.isinf(elliprd(1, 1, 0)) + assert np.isinf(elliprd(1, 1, complex(0, 0))) + assert np.isinf(elliprd(0, 1, complex(0, 0))) + assert isnan(elliprd(1, 1, -np.finfo(np.float64).tiny / 2.0)) + assert isnan(elliprd(1, 1, complex(-1, 0))) + args = array([[0.0, 2.0, 1.0], + [2.0, 3.0, 4.0], + [1.0j, -1.0j, 2.0], + [0.0, 1.0j, -1.0j], + [0.0, -1.0+1.0j, 1.0j], + [-2.0-1.0j, -1.0j, -1.0+1.0j]]) + expected_results = array([1.7972103521034, + 0.16510527294261, + 0.65933854154220, + 1.2708196271910+2.7811120159521j, + -1.8577235439239-0.96193450888839j, + 1.8249027393704-1.2218475784827j]) + for i, arr in enumerate(args): + assert_allclose(elliprd(*arr), expected_results[i]) + + def test_elliprf(self): + assert_allclose(elliprf(1, 1, 1), 1) + assert_allclose(elliprf(0, 1, 2), 1.31102877714605990523) + assert elliprf(1, inf, 1) == 0.0 + assert np.isinf(elliprf(0, 1, 0)) + assert isnan(elliprf(1, 1, -1)) + assert elliprf(complex(inf), 0, 1) == 0.0 + assert isnan(elliprf(1, 1, complex(-inf, 1))) + args = array([[1.0, 2.0, 0.0], + [1.0j, -1.0j, 0.0], + [0.5, 1.0, 0.0], + [-1.0+1.0j, 1.0j, 0.0], + [2.0, 3.0, 4.0], + [1.0j, -1.0j, 2.0], + [-1.0+1.0j, 1.0j, 1.0-1.0j]]) + expected_results = array([1.3110287771461, + 1.8540746773014, + 1.8540746773014, + 0.79612586584234-1.2138566698365j, + 0.58408284167715, + 1.0441445654064, + 0.93912050218619-0.53296252018635j]) + for i, arr in enumerate(args): + assert_allclose(elliprf(*arr), expected_results[i]) + + def test_elliprg(self): + assert_allclose(elliprg(1, 1, 1), 1) + assert_allclose(elliprg(0, 0, 1), 0.5) + assert_allclose(elliprg(0, 0, 0), 0) + assert np.isinf(elliprg(1, inf, 1)) + assert np.isinf(elliprg(complex(inf), 1, 1)) + args = array([[0.0, 16.0, 16.0], + [2.0, 3.0, 4.0], + [0.0, 1.0j, -1.0j], + [-1.0+1.0j, 1.0j, 0.0], + [-1.0j, -1.0+1.0j, 1.0j], + [0.0, 0.0796, 4.0]]) + expected_results = array([np.pi, + 1.7255030280692, + 0.42360654239699, + 0.44660591677018+0.70768352357515j, + 0.36023392184473+0.40348623401722j, + 1.0284758090288]) + for i, arr in enumerate(args): + assert_allclose(elliprg(*arr), expected_results[i]) + + def test_elliprj(self): + assert_allclose(elliprj(1, 1, 1, 1), 1) + assert elliprj(1, 1, inf, 1) == 0.0 + assert isnan(elliprj(1, 0, 0, 0)) + assert isnan(elliprj(-1, 1, 1, 1)) + assert elliprj(1, 1, 1, inf) == 0.0 + args = array([[0.0, 1.0, 2.0, 3.0], + [2.0, 3.0, 4.0, 5.0], + [2.0, 3.0, 4.0, -1.0+1.0j], + [1.0j, -1.0j, 0.0, 2.0], + [-1.0+1.0j, -1.0-1.0j, 1.0, 2.0], + [1.0j, -1.0j, 0.0, 1.0-1.0j], + [-1.0+1.0j, -1.0-1.0j, 1.0, -3.0+1.0j], + [2.0, 3.0, 4.0, -0.5], # Cauchy principal value + [2.0, 3.0, 4.0, -5.0]]) # Cauchy principal value + expected_results = array([0.77688623778582, + 0.14297579667157, + 0.13613945827771-0.38207561624427j, + 1.6490011662711, + 0.94148358841220, + 1.8260115229009+1.2290661908643j, + -0.61127970812028-1.0684038390007j, + 0.24723819703052, # Cauchy principal value + -0.12711230042964]) # Caucny principal value + for i, arr in enumerate(args): + assert_allclose(elliprj(*arr), expected_results[i]) + + @pytest.mark.xfail(reason="Insufficient accuracy on 32-bit") + def test_elliprj_hard(self): + assert_allclose(elliprj(6.483625725195452e-08, + 1.1649136528196886e-27, + 3.6767340167168e+13, + 0.493704617023468), + 8.63426920644241857617477551054e-6, + rtol=5e-15, atol=1e-20) + assert_allclose(elliprj(14.375105857849121, + 9.993988969725365e-11, + 1.72844262269944e-26, + 5.898871222598245e-06), + 829774.1424801627252574054378691828, + rtol=5e-15, atol=1e-20) + + +class TestEllipLegendreCarlsonIdentities: + """Test identities expressing the Legendre elliptic integrals in terms + of Carlson's symmetric integrals. These identities can be found + in the DLMF https://dlmf.nist.gov/19.25#i . + """ + + def setup_class(self): + self.m_n1_1 = np.arange(-1., 1., 0.01) + # For double, this is -(2**1024) + self.max_neg = finfo(double).min + # Lots of very negative numbers + self.very_neg_m = -1. * 2.**arange(-1 + + np.log2(-self.max_neg), 0., + -1.) + self.ms_up_to_1 = np.concatenate(([self.max_neg], + self.very_neg_m, + self.m_n1_1)) + + def test_k(self): + """Test identity: + K(m) = R_F(0, 1-m, 1) + """ + m = self.ms_up_to_1 + assert_allclose(ellipk(m), elliprf(0., 1.-m, 1.)) + + def test_km1(self): + """Test identity: + K(m) = R_F(0, 1-m, 1) + But with the ellipkm1 function + """ + # For double, this is 2**-1022 + tiny = finfo(double).tiny + # All these small powers of 2, up to 2**-1 + m1 = tiny * 2.**arange(0., -np.log2(tiny)) + assert_allclose(ellipkm1(m1), elliprf(0., m1, 1.)) + + def test_e(self): + """Test identity: + E(m) = 2*R_G(0, 1-k^2, 1) + """ + m = self.ms_up_to_1 + assert_allclose(ellipe(m), 2.*elliprg(0., 1.-m, 1.)) + + +class TestErf: + + def test_erf(self): + er = special.erf(.25) + assert_almost_equal(er,0.2763263902,8) + + def test_erf_zeros(self): + erz = special.erf_zeros(5) + erzr = array([1.45061616+1.88094300j, + 2.24465928+2.61657514j, + 2.83974105+3.17562810j, + 3.33546074+3.64617438j, + 3.76900557+4.06069723j]) + assert_array_almost_equal(erz,erzr,4) + + def _check_variant_func(self, func, other_func, rtol, atol=0): + np.random.seed(1234) + n = 10000 + x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1) + y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1) + z = x + 1j*y + + with np.errstate(all='ignore'): + w = other_func(z) + w_real = other_func(x).real + + mask = np.isfinite(w) + w = w[mask] + z = z[mask] + + mask = np.isfinite(w_real) + w_real = w_real[mask] + x = x[mask] + + # test both real and complex variants + assert_func_equal(func, w, z, rtol=rtol, atol=atol) + assert_func_equal(func, w_real, x, rtol=rtol, atol=atol) + + def test_erfc_consistent(self): + self._check_variant_func( + cephes.erfc, + lambda z: 1 - cephes.erf(z), + rtol=1e-12, + atol=1e-14 # <- the test function loses precision + ) + + def test_erfcx_consistent(self): + self._check_variant_func( + cephes.erfcx, + lambda z: np.exp(z*z) * cephes.erfc(z), + rtol=1e-12 + ) + + def test_erfi_consistent(self): + self._check_variant_func( + cephes.erfi, + lambda z: -1j * cephes.erf(1j*z), + rtol=1e-12 + ) + + def test_dawsn_consistent(self): + self._check_variant_func( + cephes.dawsn, + lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z), + rtol=1e-12 + ) + + def test_erf_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -1, 1] + assert_allclose(special.erf(vals), expected, rtol=1e-15) + + def test_erfc_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, 2, 0] + assert_allclose(special.erfc(vals), expected, rtol=1e-15) + + def test_erfcx_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, np.inf, 0] + assert_allclose(special.erfcx(vals), expected, rtol=1e-15) + + def test_erfi_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -np.inf, np.inf] + assert_allclose(special.erfi(vals), expected, rtol=1e-15) + + def test_dawsn_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -0.0, 0.0] + assert_allclose(special.dawsn(vals), expected, rtol=1e-15) + + def test_wofz_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j] + assert_allclose(special.wofz(vals), expected, rtol=1e-15) + + +class TestEuler: + def test_euler(self): + eu0 = special.euler(0) + eu1 = special.euler(1) + eu2 = special.euler(2) # just checking segfaults + assert_allclose(eu0, [1], rtol=1e-15) + assert_allclose(eu1, [1, 0], rtol=1e-15) + assert_allclose(eu2, [1, 0, -1], rtol=1e-15) + eu24 = special.euler(24) + mathworld = [1,1,5,61,1385,50521,2702765,199360981, + 19391512145,2404879675441, + 370371188237525,69348874393137901, + 15514534163557086905] + correct = zeros((25,),'d') + for k in range(0,13): + if (k % 2): + correct[2*k] = -float(mathworld[k]) + else: + correct[2*k] = float(mathworld[k]) + with np.errstate(all='ignore'): + err = nan_to_num((eu24-correct)/correct) + errmax = max(err) + assert_almost_equal(errmax, 0.0, 14) + + +class TestExp: + def test_exp2(self): + ex = special.exp2(2) + exrl = 2**2 + assert_equal(ex,exrl) + + def test_exp2more(self): + exm = special.exp2(2.5) + exmrl = 2**(2.5) + assert_almost_equal(exm,exmrl,8) + + def test_exp10(self): + ex = special.exp10(2) + exrl = 10**2 + assert_approx_equal(ex,exrl) + + def test_exp10more(self): + exm = special.exp10(2.5) + exmrl = 10**(2.5) + assert_almost_equal(exm,exmrl,8) + + def test_expm1(self): + ex = (special.expm1(2),special.expm1(3),special.expm1(4)) + exrl = (exp(2)-1,exp(3)-1,exp(4)-1) + assert_array_almost_equal(ex,exrl,8) + + def test_expm1more(self): + ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2)) + exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1) + assert_array_almost_equal(ex1,exrl1,8) + + +class TestFactorialFunctions: + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_scalar_return_type(self, exact): + assert np.isscalar(special.factorial(1, exact=exact)) + assert np.isscalar(special.factorial2(1, exact=exact)) + assert np.isscalar(special.factorialk(1, 3, exact=exact)) + + @pytest.mark.parametrize("n", [-1, -2, -3]) + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_negative(self, exact, n): + assert_equal(special.factorial(n, exact=exact), 0) + assert_equal(special.factorial2(n, exact=exact), 0) + assert_equal(special.factorialk(n, 3, exact=exact), 0) + + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_negative_array(self, exact): + assert_func = assert_array_equal if exact else assert_allclose + # Consistent output for n < 0 + assert_func(special.factorial([-5, -4, 0, 1], exact=exact), + [0, 0, 1, 1]) + assert_func(special.factorial2([-5, -4, 0, 1], exact=exact), + [0, 0, 1, 1]) + assert_func(special.factorialk([-5, -4, 0, 1], 3, exact=exact), + [0, 0, 1, 1]) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("content", [np.nan, None, np.datetime64('nat')], + ids=["NaN", "None", "NaT"]) + def test_factorialx_nan(self, content, exact): + # scalar + assert special.factorial(content, exact=exact) is np.nan + assert special.factorial2(content, exact=exact) is np.nan + assert special.factorialk(content, 3, exact=exact) is np.nan + # array-like (initializes np.array with default dtype) + if content is not np.nan: + # None causes object dtype, which is not supported; as is datetime + with pytest.raises(ValueError, match="Unsupported datatype.*"): + special.factorial([content], exact=exact) + elif exact: + with pytest.raises(ValueError, match="factorial with `exact=Tr.*"): + special.factorial([content], exact=exact) + else: + assert np.isnan(special.factorial([content], exact=exact)[0]) + # factorial{2,k} don't support array case due to dtype constraints + with pytest.raises(ValueError, match="factorial2 does not support.*"): + special.factorial2([content], exact=exact) + with pytest.raises(ValueError, match="factorialk does not support.*"): + special.factorialk([content], 3, exact=exact) + # array-case also tested in test_factorial{,2,k}_corner_cases + + @pytest.mark.parametrize("levels", range(1, 5)) + @pytest.mark.parametrize("exact", [True, False]) + def test_factorialx_array_shape(self, levels, exact): + def _nest_me(x, k=1): + """ + Double x and nest it k times + + For example: + >>> _nest_me([3, 4], 2) + [[[3, 4], [3, 4]], [[3, 4], [3, 4]]] + """ + if k == 0: + return x + else: + return _nest_me([x, x], k-1) + + def _check(res, nucleus): + exp = np.array(_nest_me(nucleus, k=levels), dtype=object) + # test that ndarray shape is maintained + # need to cast to float due to numpy/numpy#21220 + assert_allclose(res.astype(np.float64), exp.astype(np.float64)) + + n = np.array(_nest_me([5, 25], k=levels)) + exp_nucleus = {1: [120, math.factorial(25)], + # correctness of factorial{2,k}() is tested elsewhere + 2: [15, special.factorial2(25, exact=True)], + 3: [10, special.factorialk(25, 3, exact=True)]} + + _check(special.factorial(n, exact=exact), exp_nucleus[1]) + _check(special.factorial2(n, exact=exact), exp_nucleus[2]) + _check(special.factorialk(n, 3, exact=exact), exp_nucleus[3]) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dtype", [ + None, int, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64 + ]) + @pytest.mark.parametrize("dim", range(0, 5)) + def test_factorialx_array_dimension(self, dim, dtype, exact): + n = np.array(5, dtype=dtype, ndmin=dim) + exp = {1: 120, 2: 15, 3: 10} + assert_allclose(special.factorial(n, exact=exact), + np.array(exp[1], ndmin=dim)) + assert_allclose(special.factorial2(n, exact=exact), + np.array(exp[2], ndmin=dim)) + assert_allclose(special.factorialk(n, 3, exact=exact), + np.array(exp[3], ndmin=dim)) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("level", range(1, 5)) + def test_factorialx_array_like(self, level, exact): + def _nest_me(x, k=1): + if k == 0: + return x + else: + return _nest_me([x], k-1) + + n = _nest_me([5], k=level-1) # nested list + exp_nucleus = {1: 120, 2: 15, 3: 10} + assert_func = assert_array_equal if exact else assert_allclose + assert_func(special.factorial(n, exact=exact), + np.array(exp_nucleus[1], ndmin=level)) + assert_func(special.factorial2(n, exact=exact), + np.array(exp_nucleus[2], ndmin=level)) + assert_func(special.factorialk(n, 3, exact=exact), + np.array(exp_nucleus[3], ndmin=level)) + + # note that n=170 is the last integer such that factorial(n) fits float64 + @pytest.mark.parametrize('n', range(30, 180, 10)) + def test_factorial_accuracy(self, n): + # Compare exact=True vs False, i.e. that the accuracy of the + # approximation is better than the specified tolerance. + + rtol = 6e-14 if sys.platform == 'win32' else 1e-15 + # need to cast exact result to float due to numpy/numpy#21220 + assert_allclose(float(special.factorial(n, exact=True)), + special.factorial(n, exact=False), rtol=rtol) + assert_allclose(special.factorial([n], exact=True).astype(float), + special.factorial([n], exact=False), rtol=rtol) + + @pytest.mark.parametrize('n', + list(range(0, 22)) + list(range(30, 180, 10))) + def test_factorial_int_reference(self, n): + # Compare all with math.factorial + correct = math.factorial(n) + assert_array_equal(correct, special.factorial(n, True)) + assert_array_equal(correct, special.factorial([n], True)[0]) + + rtol = 6e-14 if sys.platform == 'win32' else 1e-15 + assert_allclose(float(correct), special.factorial(n, False), + rtol=rtol) + assert_allclose(float(correct), special.factorial([n], False)[0], + rtol=rtol) + + def test_factorial_float_reference(self): + def _check(n, expected): + assert_allclose(special.factorial(n), expected) + assert_allclose(special.factorial([n])[0], expected) + # using floats with exact=True is deprecated for scalars... + with pytest.deprecated_call(match="Non-integer values.*"): + assert_allclose(special.factorial(n, exact=True), expected) + # ... and already an error for arrays + with pytest.raises(ValueError, match="factorial with `exact=Tr.*"): + special.factorial([n], exact=True) + + # Reference values from mpmath for gamma(n+1) + _check(0.01, 0.994325851191506032181932988) + _check(1.11, 1.051609009483625091514147465) + _check(5.55, 314.9503192327208241614959052) + _check(11.1, 50983227.84411615655137170553) + _check(33.3, 2.493363339642036352229215273e+37) + _check(55.5, 9.479934358436729043289162027e+73) + _check(77.7, 3.060540559059579022358692625e+114) + _check(99.9, 5.885840419492871504575693337e+157) + # close to maximum for float64 + _check(170.6243, 1.79698185749571048960082e+308) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64, + np.complex128, object]) + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dim", range(0, 5)) + # test empty & non-empty arrays, with nans and mixed + @pytest.mark.parametrize("content", + [[], [1], [1.1], [np.nan], [np.nan, 1]], + ids=["[]", "[1]", "[1.1]", "[NaN]", "[NaN, 1]"]) + def test_factorial_array_corner_cases(self, content, dim, exact, dtype): + if dtype == np.int64 and any(np.isnan(x) for x in content): + pytest.skip("impossible combination") + # np.array(x, ndim=0) will not be 0-dim. unless x is too + content = content if (dim > 0 or len(content) != 1) else content[0] + n = np.array(content, ndmin=dim, dtype=dtype) + result = None + if not content: + result = special.factorial(n, exact=exact) + elif not (np.issubdtype(n.dtype, np.integer) + or np.issubdtype(n.dtype, np.floating)): + with pytest.raises(ValueError, match="Unsupported datatype*"): + special.factorial(n, exact=exact) + elif exact and not np.issubdtype(n.dtype, np.integer): + with pytest.raises(ValueError, match="factorial with `exact=.*"): + special.factorial(n, exact=exact) + else: + # no error + result = special.factorial(n, exact=exact) + + # assert_equal does not distinguish scalars and 0-dim arrays of the same value, + # see https://github.com/numpy/numpy/issues/24050 + def assert_really_equal(x, y): + assert type(x) == type(y), f"types not equal: {type(x)}, {type(y)}" + assert_equal(x, y) + + if result is not None: + # keep 0-dim.; otherwise n.ravel().ndim==1, even if n.ndim==0 + n_flat = n.ravel() if n.ndim else n + ref = special.factorial(n_flat, exact=exact) if n.size else [] + # expected result is empty if and only if n is empty, + # and has the same dtype & dimension as n + expected = np.array(ref, ndmin=dim, dtype=dtype) + assert_really_equal(result, expected) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None], + ids=["1", "1.1", "2+2j", "NaN", "None"]) + def test_factorial_scalar_corner_cases(self, n, exact): + if (n is None or n is np.nan or np.issubdtype(type(n), np.integer) + or np.issubdtype(type(n), np.floating)): + # no error + if (np.issubdtype(type(n), np.floating) and exact + and n is not np.nan): + with pytest.deprecated_call(match="Non-integer values.*"): + result = special.factorial(n, exact=exact) + else: + result = special.factorial(n, exact=exact) + exp = np.nan if n is np.nan or n is None else special.factorial(n) + assert_equal(result, exp) + else: + with pytest.raises(ValueError, match="Unsupported datatype*"): + special.factorial(n, exact=exact) + + # use odd increment to make sure both odd & even numbers are tested! + @pytest.mark.parametrize('n', range(30, 180, 11)) + def test_factorial2_accuracy(self, n): + # Compare exact=True vs False, i.e. that the accuracy of the + # approximation is better than the specified tolerance. + + rtol = 2e-14 if sys.platform == 'win32' else 1e-15 + # need to cast exact result to float due to numpy/numpy#21220 + assert_allclose(float(special.factorial2(n, exact=True)), + special.factorial2(n, exact=False), rtol=rtol) + assert_allclose(special.factorial2([n], exact=True).astype(float), + special.factorial2([n], exact=False), rtol=rtol) + + @pytest.mark.parametrize('n', + list(range(0, 22)) + list(range(30, 180, 11))) + def test_factorial2_int_reference(self, n): + # Compare all with correct value + + # Cannot use np.product due to overflow + correct = functools.reduce(operator.mul, list(range(n, 0, -2)), 1) + + assert_array_equal(correct, special.factorial2(n, True)) + assert_array_equal(correct, special.factorial2([n], True)[0]) + + assert_allclose(float(correct), special.factorial2(n, False)) + assert_allclose(float(correct), special.factorial2([n], False)[0]) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64, + np.complex128, object]) + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dim", range(0, 5)) + # test empty & non-empty arrays, with nans and mixed + @pytest.mark.parametrize("content", [[], [1], [np.nan], [np.nan, 1]], + ids=["[]", "[1]", "[NaN]", "[NaN, 1]"]) + def test_factorial2_array_corner_cases(self, content, dim, exact, dtype): + if dtype == np.int64 and any(np.isnan(x) for x in content): + pytest.skip("impossible combination") + # np.array(x, ndim=0) will not be 0-dim. unless x is too + content = content if (dim > 0 or len(content) != 1) else content[0] + n = np.array(content, ndmin=dim, dtype=dtype) + if np.issubdtype(n.dtype, np.integer) or (not content): + # no error + result = special.factorial2(n, exact=exact) + # expected result is identical to n for exact=True resp. empty + # arrays (assert_allclose chokes on object), otherwise up to tol + func = assert_equal if exact or (not content) else assert_allclose + func(result, n) + else: + with pytest.raises(ValueError, match="factorial2 does not*"): + special.factorial2(n, 3) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None], + ids=["1", "1.1", "2+2j", "NaN", "None"]) + def test_factorial2_scalar_corner_cases(self, n, exact): + if n is None or n is np.nan or np.issubdtype(type(n), np.integer): + # no error + result = special.factorial2(n, exact=exact) + exp = np.nan if n is np.nan or n is None else special.factorial(n) + assert_equal(result, exp) + else: + with pytest.raises(ValueError, match="factorial2 does not*"): + special.factorial2(n, exact=exact) + + @pytest.mark.parametrize("k", range(1, 5)) + # note that n=170 is the last integer such that factorial(n) fits float64; + # use odd increment to make sure both odd & even numbers are tested + @pytest.mark.parametrize('n', range(170, 20, -29)) + def test_factorialk_accuracy(self, n, k): + # Compare exact=True vs False, i.e. that the accuracy of the + # approximation is better than the specified tolerance. + + # need to cast exact result to float due to numpy/numpy#21220 + assert_allclose(float(special.factorialk(n, k=k, exact=True)), + special.factorialk(n, k=k, exact=False)) + assert_allclose(special.factorialk([n], k=k, exact=True).astype(float), + special.factorialk([n], k=k, exact=False)) + + @pytest.mark.parametrize('k', list(range(1, 5)) + [10, 20]) + @pytest.mark.parametrize('n', + list(range(0, 22)) + list(range(22, 100, 11))) + def test_factorialk_int_reference(self, n, k): + # Compare all with correct value + + # Would be nice to use np.product here, but that's + # broken on windows, see numpy/numpy#21219 + correct = functools.reduce(operator.mul, list(range(n, 0, -k)), 1) + + assert_array_equal(correct, special.factorialk(n, k, True)) + assert_array_equal(correct, special.factorialk([n], k, True)[0]) + + assert_allclose(float(correct), special.factorialk(n, k, False)) + assert_allclose(float(correct), special.factorialk([n], k, False)[0]) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64, + np.complex128, object]) + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("dim", range(0, 5)) + # test empty & non-empty arrays, with nans and mixed + @pytest.mark.parametrize("content", [[], [1], [np.nan], [np.nan, 1]], + ids=["[]", "[1]", "[NaN]", "[NaN, 1]"]) + def test_factorialk_array_corner_cases(self, content, dim, exact, dtype): + if dtype == np.int64 and any(np.isnan(x) for x in content): + pytest.skip("impossible combination") + # np.array(x, ndim=0) will not be 0-dim. unless x is too + content = content if (dim > 0 or len(content) != 1) else content[0] + n = np.array(content, ndmin=dim, dtype=dtype if exact else np.float64) + if np.issubdtype(n.dtype, np.integer) or (not content): + # no error; expected result is identical to n + assert_equal(special.factorialk(n, 3, exact=exact), n) + else: + with pytest.raises(ValueError, match="factorialk does not*"): + special.factorialk(n, 3, exact=exact) + + @pytest.mark.parametrize("exact", [True, False, None]) + @pytest.mark.parametrize("k", range(1, 5)) + @pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None], + ids=["1", "1.1", "2+2j", "NaN", "None"]) + def test_factorialk_scalar_corner_cases(self, n, k, exact): + if n is None or n is np.nan or np.issubdtype(type(n), np.integer): + if exact is None: + with pytest.deprecated_call(match="factorialk will default.*"): + result = special.factorialk(n, k=k, exact=exact) + else: + # no error + result = special.factorialk(n, k=k, exact=exact) + + nan_cond = n is np.nan or n is None + # factorialk(1, k) == 1 for all k + expected = np.nan if nan_cond else 1 + assert_equal(result, expected) + else: + with pytest.raises(ValueError, match="factorialk does not*"): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "factorialk will default") + special.factorialk(n, k=k, exact=exact) + + @pytest.mark.parametrize("k", [0, 1.1, np.nan, "1"]) + def test_factorialk_raises_k(self, k): + with pytest.raises(ValueError, match="k must be a positive integer*"): + special.factorialk(1, k) + + @pytest.mark.parametrize("exact", [True, False]) + @pytest.mark.parametrize("k", range(1, 12)) + def test_factorialk_dtype(self, k, exact): + kw = {"k": k, "exact": exact} + if exact and k in _FACTORIALK_LIMITS_64BITS.keys(): + n = np.array([_FACTORIALK_LIMITS_32BITS[k]]) + assert_equal(special.factorialk(n, **kw).dtype, np_long) + assert_equal(special.factorialk(n + 1, **kw).dtype, np.int64) + # assert maximality of limits for given dtype + assert special.factorialk(n + 1, **kw) > np.iinfo(np.int32).max + + n = np.array([_FACTORIALK_LIMITS_64BITS[k]]) + assert_equal(special.factorialk(n, **kw).dtype, np.int64) + assert_equal(special.factorialk(n + 1, **kw).dtype, object) + assert special.factorialk(n + 1, **kw) > np.iinfo(np.int64).max + else: + n = np.array([_FACTORIALK_LIMITS_64BITS.get(k, 1)]) + # for exact=True and k >= 10, we always return object; + # for exact=False it's always float + dtype = object if exact else np.float64 + assert_equal(special.factorialk(n, **kw).dtype, dtype) + + def test_factorial_mixed_nan_inputs(self): + x = np.array([np.nan, 1, 2, 3, np.nan]) + expected = np.array([np.nan, 1, 2, 6, np.nan]) + assert_equal(special.factorial(x, exact=False), expected) + with pytest.raises(ValueError, match="factorial with `exact=True.*"): + special.factorial(x, exact=True) + + +class TestFresnel: + @pytest.mark.parametrize("z, s, c", [ + # some positive value + (.5, 0.064732432859999287, 0.49234422587144644), + (.5 + .0j, 0.064732432859999287, 0.49234422587144644), + # negative half annulus + # https://github.com/scipy/scipy/issues/12309 + # Reference values can be reproduced with + # https://www.wolframalpha.com/input/?i=FresnelS%5B-2.0+%2B+0.1i%5D + # https://www.wolframalpha.com/input/?i=FresnelC%5B-2.0+%2B+0.1i%5D + ( + -2.0 + 0.1j, + -0.3109538687728942-0.0005870728836383176j, + -0.4879956866358554+0.10670801832903172j + ), + ( + -0.1 - 1.5j, + -0.03918309471866977+0.7197508454568574j, + 0.09605692502968956-0.43625191013617465j + ), + # a different algorithm kicks in for "large" values, i.e., |z| >= 4.5, + # make sure to test both float and complex values; a different + # algorithm is used + (6.0, 0.44696076, 0.49953147), + (6.0 + 0.0j, 0.44696076, 0.49953147), + (6.0j, -0.44696076j, 0.49953147j), + (-6.0 + 0.0j, -0.44696076, -0.49953147), + (-6.0j, 0.44696076j, -0.49953147j), + # inf + (np.inf, 0.5, 0.5), + (-np.inf, -0.5, -0.5), + ]) + def test_fresnel_values(self, z, s, c): + frs = array(special.fresnel(z)) + assert_array_almost_equal(frs, array([s, c]), 8) + + # values from pg 329 Table 7.11 of A & S + # slightly corrected in 4th decimal place + def test_fresnel_zeros(self): + szo, czo = special.fresnel_zeros(5) + assert_array_almost_equal(szo, + array([2.0093+0.2885j, + 2.8335+0.2443j, + 3.4675+0.2185j, + 4.0026+0.2009j, + 4.4742+0.1877j]),3) + assert_array_almost_equal(czo, + array([1.7437+0.3057j, + 2.6515+0.2529j, + 3.3204+0.2240j, + 3.8757+0.2047j, + 4.3611+0.1907j]),3) + vals1 = special.fresnel(szo)[0] + vals2 = special.fresnel(czo)[1] + assert_array_almost_equal(vals1,0,14) + assert_array_almost_equal(vals2,0,14) + + def test_fresnelc_zeros(self): + szo, czo = special.fresnel_zeros(6) + frc = special.fresnelc_zeros(6) + assert_array_almost_equal(frc,czo,12) + + def test_fresnels_zeros(self): + szo, czo = special.fresnel_zeros(5) + frs = special.fresnels_zeros(5) + assert_array_almost_equal(frs,szo,12) + + +class TestGamma: + def test_gamma(self): + gam = special.gamma(5) + assert_equal(gam,24.0) + + def test_gammaln(self): + gamln = special.gammaln(3) + lngam = log(special.gamma(3)) + assert_almost_equal(gamln,lngam,8) + + def test_gammainccinv(self): + gccinv = special.gammainccinv(.5,.5) + gcinv = special.gammaincinv(.5,.5) + assert_almost_equal(gccinv,gcinv,8) + + @with_special_errors + def test_gammaincinv(self): + y = special.gammaincinv(.4,.4) + x = special.gammainc(.4,y) + assert_almost_equal(x,0.4,1) + y = special.gammainc(10, 0.05) + x = special.gammaincinv(10, 2.5715803516000736e-20) + assert_almost_equal(0.05, x, decimal=10) + assert_almost_equal(y, 2.5715803516000736e-20, decimal=10) + x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18) + assert_almost_equal(11.0, x, decimal=10) + + @with_special_errors + def test_975(self): + # Regression test for ticket #975 -- switch point in algorithm + # check that things work OK at the point, immediately next floats + # around it, and a bit further away + pts = [0.25, + np.nextafter(0.25, 0), 0.25 - 1e-12, + np.nextafter(0.25, 1), 0.25 + 1e-12] + for xp in pts: + y = special.gammaincinv(.4, xp) + x = special.gammainc(0.4, y) + assert_allclose(x, xp, rtol=1e-12) + + def test_rgamma(self): + rgam = special.rgamma(8) + rlgam = 1/special.gamma(8) + assert_almost_equal(rgam,rlgam,8) + + def test_infinity(self): + assert_(np.isinf(special.gamma(-1))) + assert_equal(special.rgamma(-1), 0) + + +class TestHankel: + + def test_negv1(self): + assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14) + + def test_hankel1(self): + hank1 = special.hankel1(1,.1) + hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j) + assert_almost_equal(hank1,hankrl,8) + + def test_negv1e(self): + assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14) + + def test_hankel1e(self): + hank1e = special.hankel1e(1,.1) + hankrle = special.hankel1(1,.1)*exp(-.1j) + assert_almost_equal(hank1e,hankrle,8) + + def test_negv2(self): + assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14) + + def test_hankel2(self): + hank2 = special.hankel2(1,.1) + hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j) + assert_almost_equal(hank2,hankrl2,8) + + def test_neg2e(self): + assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14) + + def test_hankl2e(self): + hank2e = special.hankel2e(1,.1) + hankrl2e = special.hankel2e(1,.1) + assert_almost_equal(hank2e,hankrl2e,8) + + +class TestHyper: + def test_h1vp(self): + h1 = special.h1vp(1,.1) + h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j) + assert_almost_equal(h1,h1real,8) + + def test_h2vp(self): + h2 = special.h2vp(1,.1) + h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j) + assert_almost_equal(h2,h2real,8) + + def test_hyp0f1(self): + # scalar input + assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12) + assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15) + + # float input, expected values match mpmath + x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5]) + expected = np.array([0.58493659229143, 0.70566805723127, 1.0, + 1.37789689539747, 1.60373685288480]) + assert_allclose(x, expected, rtol=1e-12) + + # complex input + x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j) + assert_allclose(x, expected.astype(complex), rtol=1e-12) + + # test broadcasting + x1 = [0.5, 1.5, 2.5] + x2 = [0, 1, 0.5] + x = special.hyp0f1(x1, x2) + expected = [1.0, 1.8134302039235093, 1.21482702689997] + assert_allclose(x, expected, rtol=1e-12) + x = special.hyp0f1(np.vstack([x1] * 2), x2) + assert_allclose(x, np.vstack([expected] * 2), rtol=1e-12) + assert_raises(ValueError, special.hyp0f1, + np.vstack([x1] * 3), [0, 1]) + + def test_hyp0f1_gh5764(self): + # Just checks the point that failed; there's a more systematic + # test in test_mpmath + res = special.hyp0f1(0.8, 0.5 + 0.5*1J) + # The expected value was generated using mpmath + assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665) + + def test_hyp1f1(self): + hyp1 = special.hyp1f1(.1,.1,.3) + assert_almost_equal(hyp1, 1.3498588075760032,7) + + # test contributed by Moritz Deger (2008-05-29) + # https://github.com/scipy/scipy/issues/1186 (Trac #659) + + # reference data obtained from mathematica [ a, b, x, m(a,b,x)]: + # produced with test_hyp1f1.nb + ref_data = array([ + [-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04], + [2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00], + [-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05], + [5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08], + [-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24], + [4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21], + [1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13], + [2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13], + [1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02], + [1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10], + [-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01], + [8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21], + [1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20], + [-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07], + [2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03], + [2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02], + [6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11], + [-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03], + [2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17], + [8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01], + [1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00], + [-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00], + [2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23], + [-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01], + [3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04], + [-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08], + [2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01], + [-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07], + [1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03], + [-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09], + [-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06], + [-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00], + [-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01], + [3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02], + [6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02], + [-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02], + [2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00], + [1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09], + [1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01], + [1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00], + [1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02], + [-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05], + [-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05], + [7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02], + [2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02], + [-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13], + [-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05], + [-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12], + [-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01], + [-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16], + [2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37], + [5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06], + [-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02], + [-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12], + [5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27], + [-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04], + [1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06], + [2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07], + [5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03], + [-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07], + [1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27], + [6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12], + [1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32], + [-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04], + [-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01], + [-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02], + [-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19], + [1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09], + [2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31], + [-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01], + [2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02], + [-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08], + [2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09], + [1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33], + [-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01], + [7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29], + [2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01], + [8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29], + [-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02], + [-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00], + [-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08], + [-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01], + [-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01], + [-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01], + [6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13], + [-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11], + [-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02], + [6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02], + [-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01], + [7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31], + [-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04], + [5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25], + [3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01], + [-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00], + [2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02], + [2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05], + [-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02], + [-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01], + [-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01], + [-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00] + ]) + + for a,b,c,expected in ref_data: + result = special.hyp1f1(a,b,c) + assert_(abs(expected - result)/expected < 1e-4) + + def test_hyp1f1_gh2957(self): + hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933) + hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934) + assert_almost_equal(hyp1, hyp2, 12) + + def test_hyp1f1_gh2282(self): + hyp = special.hyp1f1(0.5, 1.5, -1000) + assert_almost_equal(hyp, 0.028024956081989643, 12) + + def test_hyp2f1(self): + # a collection of special cases taken from AMS 55 + values = [ + [0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))], + [0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)], + [1, 1, 2, 0.2, -1/0.2*log(1-0.2)], + [3, 3.5, 1.5, 0.2**2, 0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))], + [-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)], + [3, 4, 8, 1, + special.gamma(8) * special.gamma(8-4-3) + / special.gamma(8-3) / special.gamma(8-4)], + [3, 2, 3-2+1, -1, + 1./2**3*sqrt(pi) * special.gamma(1+3-2) + / special.gamma(1+0.5*3-2) / special.gamma(0.5+0.5*3)], + [5, 2, 5-2+1, -1, + 1./2**5*sqrt(pi) * special.gamma(1+5-2) + / special.gamma(1+0.5*5-2) / special.gamma(0.5+0.5*5)], + [4, 0.5+4, 1.5-2*4, -1./3, + (8./9)**(-2*4)*special.gamma(4./3) * special.gamma(1.5-2*4) + / special.gamma(3./2) / special.gamma(4./3-2*4)], + # and some others + # ticket #424 + [1.5, -0.5, 1.0, -10.0, 4.1300097765277476484], + # negative integer a or b, with c-a-b integer and x > 0.9 + [-2,3,1,0.95,0.715], + [2,-3,1,0.95,-0.007], + [-6,3,1,0.95,0.0000810625], + [2,-5,1,0.95,-0.000029375], + # huge negative integers + (10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24), + (10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18), + ] + for i, (a, b, c, x, v) in enumerate(values): + cv = special.hyp2f1(a, b, c, x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_hyperu(self): + val1 = special.hyperu(1,0.1,100) + assert_almost_equal(val1,0.0098153,7) + a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2] + a,b = asarray(a), asarray(b) + z = 0.5 + hypu = special.hyperu(a,b,z) + hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) / + (special.gamma(1+a-b)*special.gamma(b)) - + z**(1-b)*special.hyp1f1(1+a-b,2-b,z) + / (special.gamma(a)*special.gamma(2-b))) + assert_array_almost_equal(hypu,hprl,12) + + def test_hyperu_gh2287(self): + assert_almost_equal(special.hyperu(1, 1.5, 20.2), + 0.048360918656699191, 12) + + +class TestBessel: + def test_itj0y0(self): + it0 = array(special.itj0y0(.2)) + assert_array_almost_equal( + it0, + array([0.19933433254006822, -0.34570883800412566]), + 8, + ) + + def test_it2j0y0(self): + it2 = array(special.it2j0y0(.2)) + assert_array_almost_equal( + it2, + array([0.0049937546274601858, -0.43423067011231614]), + 8, + ) + + def test_negv_iv(self): + assert_equal(special.iv(3,2), special.iv(-3,2)) + + def test_j0(self): + oz = special.j0(.1) + ozr = special.jn(0,.1) + assert_almost_equal(oz,ozr,8) + + def test_j1(self): + o1 = special.j1(.1) + o1r = special.jn(1,.1) + assert_almost_equal(o1,o1r,8) + + def test_jn(self): + jnnr = special.jn(1,.2) + assert_almost_equal(jnnr,0.099500832639235995,8) + + def test_negv_jv(self): + assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14) + + def test_jv(self): + values = [[0, 0.1, 0.99750156206604002], + [2./3, 1e-8, 0.3239028506761532e-5], + [2./3, 1e-10, 0.1503423854873779e-6], + [3.1, 1e-10, 0.1711956265409013e-32], + [2./3, 4.0, -0.2325440850267039], + ] + for i, (v, x, y) in enumerate(values): + yc = special.jv(v, x) + assert_almost_equal(yc, y, 8, err_msg='test #%d' % i) + + def test_negv_jve(self): + assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14) + + def test_jve(self): + jvexp = special.jve(1,.2) + assert_almost_equal(jvexp,0.099500832639235995,8) + jvexp1 = special.jve(1,.2+1j) + z = .2+1j + jvexpr = special.jv(1,z)*exp(-abs(z.imag)) + assert_almost_equal(jvexp1,jvexpr,8) + + def test_jn_zeros(self): + jn0 = special.jn_zeros(0,5) + jn1 = special.jn_zeros(1,5) + assert_array_almost_equal(jn0,array([2.4048255577, + 5.5200781103, + 8.6537279129, + 11.7915344391, + 14.9309177086]),4) + assert_array_almost_equal(jn1,array([3.83171, + 7.01559, + 10.17347, + 13.32369, + 16.47063]),4) + + jn102 = special.jn_zeros(102,5) + assert_allclose(jn102, array([110.89174935992040343, + 117.83464175788308398, + 123.70194191713507279, + 129.02417238949092824, + 134.00114761868422559]), rtol=1e-13) + + jn301 = special.jn_zeros(301,5) + assert_allclose(jn301, array([313.59097866698830153, + 323.21549776096288280, + 331.22338738656748796, + 338.39676338872084500, + 345.03284233056064157]), rtol=1e-13) + + def test_jn_zeros_slow(self): + jn0 = special.jn_zeros(0, 300) + assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13) + assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13) + assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13) + + jn10 = special.jn_zeros(10, 300) + assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13) + assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13) + assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13) + + jn3010 = special.jn_zeros(3010,5) + assert_allclose(jn3010, array([3036.86590780927, + 3057.06598526482, + 3073.66360690272, + 3088.37736494778, + 3101.86438139042]), rtol=1e-8) + + def test_jnjnp_zeros(self): + jn = special.jn + + def jnp(n, x): + return (jn(n-1,x) - jn(n+1,x))/2 + for nt in range(1, 30): + z, n, m, t = special.jnjnp_zeros(nt) + for zz, nn, tt in zip(z, n, t): + if tt == 0: + assert_allclose(jn(nn, zz), 0, atol=1e-6) + elif tt == 1: + assert_allclose(jnp(nn, zz), 0, atol=1e-6) + else: + raise AssertionError("Invalid t return for nt=%d" % nt) + + def test_jnp_zeros(self): + jnp = special.jnp_zeros(1,5) + assert_array_almost_equal(jnp, array([1.84118, + 5.33144, + 8.53632, + 11.70600, + 14.86359]),4) + jnp = special.jnp_zeros(443,5) + assert_allclose(special.jvp(443, jnp), 0, atol=1e-15) + + def test_jnyn_zeros(self): + jnz = special.jnyn_zeros(1,5) + assert_array_almost_equal(jnz,(array([3.83171, + 7.01559, + 10.17347, + 13.32369, + 16.47063]), + array([1.84118, + 5.33144, + 8.53632, + 11.70600, + 14.86359]), + array([2.19714, + 5.42968, + 8.59601, + 11.74915, + 14.89744]), + array([3.68302, + 6.94150, + 10.12340, + 13.28576, + 16.44006])),5) + + def test_jvp(self): + jvprim = special.jvp(2,2) + jv0 = (special.jv(1,2)-special.jv(3,2))/2 + assert_almost_equal(jvprim,jv0,10) + + def test_k0(self): + ozk = special.k0(.1) + ozkr = special.kv(0,.1) + assert_almost_equal(ozk,ozkr,8) + + def test_k0e(self): + ozke = special.k0e(.1) + ozker = special.kve(0,.1) + assert_almost_equal(ozke,ozker,8) + + def test_k1(self): + o1k = special.k1(.1) + o1kr = special.kv(1,.1) + assert_almost_equal(o1k,o1kr,8) + + def test_k1e(self): + o1ke = special.k1e(.1) + o1ker = special.kve(1,.1) + assert_almost_equal(o1ke,o1ker,8) + + def test_jacobi(self): + a = 5*np.random.random() - 1 + b = 5*np.random.random() - 1 + P0 = special.jacobi(0,a,b) + P1 = special.jacobi(1,a,b) + P2 = special.jacobi(2,a,b) + P3 = special.jacobi(3,a,b) + + assert_array_almost_equal(P0.c,[1],13) + assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13) + cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)] + p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]] + assert_array_almost_equal(P2.c,array(p2c)/8.0,13) + cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3), + 12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)] + p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]] + assert_array_almost_equal(P3.c,array(p3c)/48.0,13) + + def test_kn(self): + kn1 = special.kn(0,.2) + assert_almost_equal(kn1,1.7527038555281462,8) + + def test_negv_kv(self): + assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2)) + + def test_kv0(self): + kv0 = special.kv(0,.2) + assert_almost_equal(kv0, 1.7527038555281462, 10) + + def test_kv1(self): + kv1 = special.kv(1,0.2) + assert_almost_equal(kv1, 4.775972543220472, 10) + + def test_kv2(self): + kv2 = special.kv(2,0.2) + assert_almost_equal(kv2, 49.51242928773287, 10) + + def test_kn_largeorder(self): + assert_allclose(special.kn(32, 1), 1.7516596664574289e+43) + + def test_kv_largearg(self): + assert_equal(special.kv(0, 1e19), 0) + + def test_negv_kve(self): + assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2)) + + def test_kve(self): + kve1 = special.kve(0,.2) + kv1 = special.kv(0,.2)*exp(.2) + assert_almost_equal(kve1,kv1,8) + z = .2+1j + kve2 = special.kve(0,z) + kv2 = special.kv(0,z)*exp(z) + assert_almost_equal(kve2,kv2,8) + + def test_kvp_v0n1(self): + z = 2.2 + assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10) + + def test_kvp_n1(self): + v = 3. + z = 2.2 + xc = -special.kv(v+1,z) + v/z*special.kv(v,z) + x = special.kvp(v,z, n=1) + assert_almost_equal(xc, x, 10) # this function (kvp) is broken + + def test_kvp_n2(self): + v = 3. + z = 2.2 + xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z + x = special.kvp(v, z, n=2) + assert_almost_equal(xc, x, 10) + + def test_y0(self): + oz = special.y0(.1) + ozr = special.yn(0,.1) + assert_almost_equal(oz,ozr,8) + + def test_y1(self): + o1 = special.y1(.1) + o1r = special.yn(1,.1) + assert_almost_equal(o1,o1r,8) + + def test_y0_zeros(self): + yo,ypo = special.y0_zeros(2) + zo,zpo = special.y0_zeros(2,complex=1) + all = r_[yo,zo] + allval = r_[ypo,zpo] + assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11) + assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11) + + def test_y1_zeros(self): + y1 = special.y1_zeros(1) + assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5) + + def test_y1p_zeros(self): + y1p = special.y1p_zeros(1,complex=1) + assert_array_almost_equal( + y1p, + (array([0.5768+0.904j]), array([-0.7635+0.5892j])), + 3, + ) + + def test_yn_zeros(self): + an = special.yn_zeros(4,2) + assert_array_almost_equal(an,array([5.64515, 9.36162]),5) + an = special.yn_zeros(443,5) + assert_allclose(an, [450.13573091578090314, + 463.05692376675001542, + 472.80651546418663566, + 481.27353184725625838, + 488.98055964441374646], + rtol=1e-15,) + + def test_ynp_zeros(self): + ao = special.ynp_zeros(0,2) + assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6) + ao = special.ynp_zeros(43,5) + assert_allclose(special.yvp(43, ao), 0, atol=1e-15) + ao = special.ynp_zeros(443,5) + assert_allclose(special.yvp(443, ao), 0, atol=1e-9) + + def test_ynp_zeros_large_order(self): + ao = special.ynp_zeros(443,5) + assert_allclose(special.yvp(443, ao), 0, atol=1e-14) + + def test_yn(self): + yn2n = special.yn(1,.2) + assert_almost_equal(yn2n,-3.3238249881118471,8) + + def test_yn_gh_20405(self): + # Enforce correct asymptotic behavior for large n. + observed = cephes.yn(500, 1) + assert observed == -np.inf + + def test_negv_yv(self): + assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14) + + def test_yv(self): + yv2 = special.yv(1,.2) + assert_almost_equal(yv2,-3.3238249881118471,8) + + def test_negv_yve(self): + assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14) + + def test_yve(self): + yve2 = special.yve(1,.2) + assert_almost_equal(yve2,-3.3238249881118471,8) + yve2r = special.yv(1,.2+1j)*exp(-1) + yve22 = special.yve(1,.2+1j) + assert_almost_equal(yve22,yve2r,8) + + def test_yvp(self): + yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0 + yvp1 = special.yvp(2,.2) + assert_array_almost_equal(yvp1,yvpr,10) + + def _cephes_vs_amos_points(self): + """Yield points at which to compare Cephes implementation to AMOS""" + # check several points, including large-amplitude ones + v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301] + z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300, + 10003] + yield from itertools.product(v, z) + + # check half-integers; these are problematic points at least + # for cephes/iv + yield from itertools.product(0.5 + arange(-60, 60), [3.5]) + + def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None): + for v, z in self._cephes_vs_amos_points(): + if skip is not None and skip(v, z): + continue + c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z) + if np.isinf(c1): + assert_(np.abs(c2) >= 1e300, (v, z)) + elif np.isnan(c1): + assert_(c2.imag != 0, (v, z)) + else: + assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol) + if v == int(v): + assert_allclose(c3, c2, err_msg=(v, z), + rtol=rtol, atol=atol) + + @pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") + def test_jv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305) + + @pytest.mark.xfail(platform.machine() == 'ppc64le', + reason="fails on ppc64le") + def test_yv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305) + + def test_yv_cephes_vs_amos_only_small_orders(self): + def skipper(v, z): + return abs(v) > 50 + self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, + skip=skipper) + + def test_iv_cephes_vs_amos(self): + with np.errstate(all='ignore'): + self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305) + + @pytest.mark.slow + def test_iv_cephes_vs_amos_mass_test(self): + N = 1000000 + np.random.seed(1) + v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N) + x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N) + + imsk = (np.random.randint(8, size=N) == 0) + v[imsk] = v[imsk].astype(np.int64) + + with np.errstate(all='ignore'): + c1 = special.iv(v, x) + c2 = special.iv(v, x+0j) + + # deal with differences in the inf and zero cutoffs + c1[abs(c1) > 1e300] = np.inf + c2[abs(c2) > 1e300] = np.inf + c1[abs(c1) < 1e-300] = 0 + c2[abs(c2) < 1e-300] = 0 + + dc = abs(c1/c2 - 1) + dc[np.isnan(dc)] = 0 + + k = np.argmax(dc) + + # Most error apparently comes from AMOS and not our implementation; + # there are some problems near integer orders there + assert_( + dc[k] < 2e-7, + (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)) + ) + + def test_kv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305) + self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305) + + def test_ticket_623(self): + assert_allclose(special.jv(3, 4), 0.43017147387562193) + assert_allclose(special.jv(301, 1300), 0.0183487151115275) + assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048) + + def test_ticket_853(self): + """Negative-order Bessels""" + # cephes + assert_allclose(special.jv(-1, 1), -0.4400505857449335) + assert_allclose(special.jv(-2, 1), 0.1149034849319005) + assert_allclose(special.yv(-1, 1), 0.7812128213002887) + assert_allclose(special.yv(-2, 1), -1.650682606816255) + assert_allclose(special.iv(-1, 1), 0.5651591039924851) + assert_allclose(special.iv(-2, 1), 0.1357476697670383) + assert_allclose(special.kv(-1, 1), 0.6019072301972347) + assert_allclose(special.kv(-2, 1), 1.624838898635178) + assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952) + assert_allclose(special.yv(-0.5, 1), 0.6713967071418031) + assert_allclose(special.iv(-0.5, 1), 1.231200214592967) + assert_allclose(special.kv(-0.5, 1), 0.4610685044478945) + # amos + assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335) + assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005) + assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887) + assert_allclose(special.yv(-2, 1+0j), -1.650682606816255) + + assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851) + assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383) + assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347) + assert_allclose(special.kv(-2, 1+0j), 1.624838898635178) + + assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952) + assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j) + assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031) + assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j) + + assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967) + assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j) + assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945) + assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j) + + assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3)) + assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3)) + assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3)) + assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j)) + + assert_allclose( + special.hankel1(-0.5, 1+1j), + special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j) + ) + assert_allclose( + special.hankel2(-0.5, 1+1j), + special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j) + ) + + def test_ticket_854(self): + """Real-valued Bessel domains""" + assert_(isnan(special.jv(0.5, -1))) + assert_(isnan(special.iv(0.5, -1))) + assert_(isnan(special.yv(0.5, -1))) + assert_(isnan(special.yv(1, -1))) + assert_(isnan(special.kv(0.5, -1))) + assert_(isnan(special.kv(1, -1))) + assert_(isnan(special.jve(0.5, -1))) + assert_(isnan(special.ive(0.5, -1))) + assert_(isnan(special.yve(0.5, -1))) + assert_(isnan(special.yve(1, -1))) + assert_(isnan(special.kve(0.5, -1))) + assert_(isnan(special.kve(1, -1))) + assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1)) + assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1)) + + def test_gh_7909(self): + assert_(special.kv(1.5, 0) == np.inf) + assert_(special.kve(1.5, 0) == np.inf) + + def test_ticket_503(self): + """Real-valued Bessel I overflow""" + assert_allclose(special.iv(1, 700), 1.528500390233901e302) + assert_allclose(special.iv(1000, 1120), 1.301564549405821e301) + + def test_iv_hyperg_poles(self): + assert_allclose(special.iv(-0.5, 1), 1.231200214592967) + + def iv_series(self, v, z, n=200): + k = arange(0, n).astype(double) + r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1) + r[isnan(r)] = inf + r = exp(r) + err = abs(r).max() * finfo(double).eps * n + abs(r[-1])*10 + return r.sum(), err + + def test_i0_series(self): + for z in [1., 10., 200.5]: + value, err = self.iv_series(0, z) + assert_allclose(special.i0(z), value, atol=err, err_msg=z) + + def test_i1_series(self): + for z in [1., 10., 200.5]: + value, err = self.iv_series(1, z) + assert_allclose(special.i1(z), value, atol=err, err_msg=z) + + def test_iv_series(self): + for v in [-20., -10., -1., 0., 1., 12.49, 120.]: + for z in [1., 10., 200.5, -1+2j]: + value, err = self.iv_series(v, z) + assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z)) + + def test_i0(self): + values = [[0.0, 1.0], + [1e-10, 1.0], + [0.1, 0.9071009258], + [0.5, 0.6450352706], + [1.0, 0.4657596077], + [2.5, 0.2700464416], + [5.0, 0.1835408126], + [20.0, 0.0897803119], + ] + for i, (x, v) in enumerate(values): + cv = special.i0(x) * exp(-x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_i0e(self): + oize = special.i0e(.1) + oizer = special.ive(0,.1) + assert_almost_equal(oize,oizer,8) + + def test_i1(self): + values = [[0.0, 0.0], + [1e-10, 0.4999999999500000e-10], + [0.1, 0.0452984468], + [0.5, 0.1564208032], + [1.0, 0.2079104154], + [5.0, 0.1639722669], + [20.0, 0.0875062222], + ] + for i, (x, v) in enumerate(values): + cv = special.i1(x) * exp(-x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_i1e(self): + oi1e = special.i1e(.1) + oi1er = special.ive(1,.1) + assert_almost_equal(oi1e,oi1er,8) + + def test_iti0k0(self): + iti0 = array(special.iti0k0(5)) + assert_array_almost_equal( + iti0, + array([31.848667776169801, 1.5673873907283657]), + 5, + ) + + def test_it2i0k0(self): + it2k = special.it2i0k0(.1) + assert_array_almost_equal( + it2k, + array([0.0012503906973464409, 3.3309450354686687]), + 6, + ) + + def test_iv(self): + iv1 = special.iv(0,.1)*exp(-.1) + assert_almost_equal(iv1,0.90710092578230106,10) + + def test_negv_ive(self): + assert_equal(special.ive(3,2), special.ive(-3,2)) + + def test_ive(self): + ive1 = special.ive(0,.1) + iv1 = special.iv(0,.1)*exp(-.1) + assert_almost_equal(ive1,iv1,10) + + def test_ivp0(self): + assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10) + + def test_ivp(self): + y = (special.iv(0,2) + special.iv(2,2))/2 + x = special.ivp(1,2) + assert_almost_equal(x,y,10) + + +class TestLaguerre: + def test_laguerre(self): + lag0 = special.laguerre(0) + lag1 = special.laguerre(1) + lag2 = special.laguerre(2) + lag3 = special.laguerre(3) + lag4 = special.laguerre(4) + lag5 = special.laguerre(5) + assert_array_almost_equal(lag0.c,[1],13) + assert_array_almost_equal(lag1.c,[-1,1],13) + assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13) + assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13) + assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13) + assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13) + + def test_genlaguerre(self): + k = 5*np.random.random() - 0.9 + lag0 = special.genlaguerre(0,k) + lag1 = special.genlaguerre(1,k) + lag2 = special.genlaguerre(2,k) + lag3 = special.genlaguerre(3,k) + assert_equal(lag0.c, [1]) + assert_equal(lag1.c, [-1, k + 1]) + assert_almost_equal( + lag2.c, + array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0 + ) + assert_almost_equal( + lag3.c, + array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0 + ) + + +# Base polynomials come from Abrahmowitz and Stegan +class TestLegendre: + def test_legendre(self): + leg0 = special.legendre(0) + leg1 = special.legendre(1) + leg2 = special.legendre(2) + leg3 = special.legendre(3) + leg4 = special.legendre(4) + leg5 = special.legendre(5) + assert_equal(leg0.c, [1]) + assert_equal(leg1.c, [1,0]) + assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13) + assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0) + assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0) + assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0) + + @pytest.mark.parametrize('n', [1, 2, 3, 4, 5]) + @pytest.mark.parametrize('zr', [0.5241717, 12.80232, -9.699001, + 0.5122437, 0.1714377]) + @pytest.mark.parametrize('zi', [9.766818, 0.2999083, 8.24726, -22.84843, + -0.8792666]) + def test_lpn_against_clpmn(self, n, zr, zi): + reslpn = special.lpn(n, zr + zi*1j) + resclpmn = special.clpmn(0, n, zr+zi*1j) + assert_allclose(reslpn[0], resclpmn[0][0]) + assert_allclose(reslpn[1], resclpmn[1][0]) + + +class TestLambda: + def test_lmbda(self): + lam = special.lmbda(1,.1) + lamr = ( + array([special.jn(0,.1), 2*special.jn(1,.1)/.1]), + array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]) + ) + assert_array_almost_equal(lam,lamr,8) + + +class TestLog1p: + def test_log1p(self): + l1p = (special.log1p(10), special.log1p(11), special.log1p(12)) + l1prl = (log(11), log(12), log(13)) + assert_array_almost_equal(l1p,l1prl,8) + + def test_log1pmore(self): + l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2)) + l1pmrl = (log(2),log(2.1),log(2.2)) + assert_array_almost_equal(l1pm,l1pmrl,8) + + +class TestLegendreFunctions: + def test_clpmn(self): + z = 0.5+0.3j + clp = special.clpmn(2, 2, z, 3) + assert_array_almost_equal(clp, + (array([[1.0000, z, 0.5*(3*z*z-1)], + [0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)], + [0.0000, 0.0000, 3*(z*z-1)]]), + array([[0.0000, 1.0000, 3*z], + [0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)], + [0.0000, 0.0000, 6*z]])), + 7) + + def test_clpmn_close_to_real_2(self): + eps = 1e-10 + m = 1 + n = 3 + x = 0.5 + clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n] + clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n] + assert_array_almost_equal(array([clp_plus, clp_minus]), + array([special.lpmv(m, n, x), + special.lpmv(m, n, x)]), + 7) + + def test_clpmn_close_to_real_3(self): + eps = 1e-10 + m = 1 + n = 3 + x = 0.5 + clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n] + clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n] + assert_array_almost_equal(array([clp_plus, clp_minus]), + array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi), + special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]), + 7) + + def test_clpmn_across_unit_circle(self): + eps = 1e-7 + m = 1 + n = 1 + x = 1j + for type in [2, 3]: + assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n], + special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6) + + def test_inf(self): + for z in (1, -1): + for n in range(4): + for m in range(1, n): + lp = special.clpmn(m, n, z) + assert_(np.isinf(lp[1][1,1:]).all()) + lp = special.lpmn(m, n, z) + assert_(np.isinf(lp[1][1,1:]).all()) + + def test_deriv_clpmn(self): + # data inside and outside of the unit circle + zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j, + 1+1j, -1+1j, -1-1j, 1-1j] + m = 2 + n = 3 + for type in [2, 3]: + for z in zvals: + for h in [1e-3, 1e-3j]: + approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0] + - special.clpmn(m, n, z-0.5*h, type)[0])/h + assert_allclose(special.clpmn(m, n, z, type)[1], + approx_derivative, + rtol=1e-4) + + def test_lpmn(self): + lp = special.lpmn(0,2,.5) + assert_array_almost_equal(lp,(array([[1.00000, + 0.50000, + -0.12500]]), + array([[0.00000, + 1.00000, + 1.50000]])),4) + + def test_lpn(self): + lpnf = special.lpn(2,.5) + assert_array_almost_equal(lpnf,(array([1.00000, + 0.50000, + -0.12500]), + array([0.00000, + 1.00000, + 1.50000])),4) + + def test_lpmv(self): + lp = special.lpmv(0,2,.5) + assert_almost_equal(lp,-0.125,7) + lp = special.lpmv(0,40,.001) + assert_almost_equal(lp,0.1252678976534484,7) + + # XXX: this is outside the domain of the current implementation, + # so ensure it returns a NaN rather than a wrong answer. + with np.errstate(all='ignore'): + lp = special.lpmv(-1,-1,.001) + assert_(lp != 0 or np.isnan(lp)) + + def test_lqmn(self): + lqmnf = special.lqmn(0,2,.5) + lqf = special.lqn(2,.5) + assert_array_almost_equal(lqmnf[0][0],lqf[0],4) + assert_array_almost_equal(lqmnf[1][0],lqf[1],4) + + def test_lqmn_gt1(self): + """algorithm for real arguments changes at 1.0001 + test against analytical result for m=2, n=1 + """ + x0 = 1.0001 + delta = 0.00002 + for x in (x0-delta, x0+delta): + lq = special.lqmn(2, 1, x)[0][-1, -1] + expected = 2/(x*x-1) + assert_almost_equal(lq, expected) + + def test_lqmn_shape(self): + a, b = special.lqmn(4, 4, 1.1) + assert_equal(a.shape, (5, 5)) + assert_equal(b.shape, (5, 5)) + + a, b = special.lqmn(4, 0, 1.1) + assert_equal(a.shape, (5, 1)) + assert_equal(b.shape, (5, 1)) + + def test_lqn(self): + lqf = special.lqn(2,.5) + assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]), + array([1.3333, 1.216, -0.8427])),4) + + @pytest.mark.parametrize("function", [special.lpn, special.lqn]) + @pytest.mark.parametrize("n", [1, 2, 4, 8, 16, 32]) + @pytest.mark.parametrize("z_complex", [False, True]) + @pytest.mark.parametrize("z_inexact", [False, True]) + @pytest.mark.parametrize( + "input_shape", + [ + (), (1, ), (2, ), (2, 1), (1, 2), (2, 2), (2, 2, 1), (2, 2, 2) + ] + ) + def test_array_inputs_lxn(self, function, n, z_complex, z_inexact, input_shape): + """Tests for correct output shapes.""" + rng = np.random.default_rng(1234) + if z_inexact: + z = rng.integers(-3, 3, size=input_shape) + else: + z = rng.uniform(-1, 1, size=input_shape) + + if z_complex: + z = 1j * z + 0.5j * z + + P_z, P_d_z = function(n, z) + assert P_z.shape == (n + 1, ) + input_shape + assert P_d_z.shape == (n + 1, ) + input_shape + + @pytest.mark.parametrize("function", [special.lqmn]) + @pytest.mark.parametrize( + "m,n", + [(0, 1), (1, 2), (1, 4), (3, 8), (11, 16), (19, 32)] + ) + @pytest.mark.parametrize("z_inexact", [False, True]) + @pytest.mark.parametrize( + "input_shape", [ + (), (1, ), (2, ), (2, 1), (1, 2), (2, 2), (2, 2, 1) + ] + ) + def test_array_inputs_lxmn(self, function, m, n, z_inexact, input_shape): + """Tests for correct output shapes and dtypes.""" + rng = np.random.default_rng(1234) + if z_inexact: + z = rng.integers(-3, 3, size=input_shape) + else: + z = rng.uniform(-1, 1, size=input_shape) + + P_z, P_d_z = function(m, n, z) + assert P_z.shape == (m + 1, n + 1) + input_shape + assert P_d_z.shape == (m + 1, n + 1) + input_shape + + + @pytest.mark.parametrize("function", [special.clpmn, special.lqmn]) + @pytest.mark.parametrize( + "m,n", + [(0, 1), (1, 2), (1, 4), (3, 8), (11, 16), (19, 32)] + ) + @pytest.mark.parametrize( + "input_shape", [ + (), (1, ), (2, ), (2, 1), (1, 2), (2, 2), (2, 2, 1) + ] + ) + def test_array_inputs_clxmn(self, function, m, n, input_shape): + """Tests for correct output shapes and dtypes.""" + rng = np.random.default_rng(1234) + z = rng.uniform(-1, 1, size=input_shape) + z = 1j * z + 0.5j * z + + P_z, P_d_z = function(m, n, z) + assert P_z.shape == (m + 1, n + 1) + input_shape + assert P_d_z.shape == (m + 1, n + 1) + input_shape + + +class TestMathieu: + + def test_mathieu_a(self): + pass + + def test_mathieu_even_coef(self): + special.mathieu_even_coef(2,5) + # Q not defined broken and cannot figure out proper reporting order + + def test_mathieu_odd_coef(self): + # same problem as above + pass + + +class TestFresnelIntegral: + + def test_modfresnelp(self): + pass + + def test_modfresnelm(self): + pass + + +class TestOblCvSeq: + def test_obl_cv_seq(self): + obl = special.obl_cv_seq(0,3,1) + assert_array_almost_equal(obl,array([-0.348602, + 1.393206, + 5.486800, + 11.492120]),5) + + +class TestParabolicCylinder: + def test_pbdn_seq(self): + pb = special.pbdn_seq(1,.1) + assert_array_almost_equal(pb,(array([0.9975, + 0.0998]), + array([-0.0499, + 0.9925])),4) + + def test_pbdv(self): + special.pbdv(1,.2) + 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0] + + def test_pbdv_seq(self): + pbn = special.pbdn_seq(1,.1) + pbv = special.pbdv_seq(1,.1) + assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4) + + def test_pbdv_points(self): + # simple case + eta = np.linspace(-10, 10, 5) + z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta) + assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14) + + # some points + assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12) + assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12) + + def test_pbdv_gradient(self): + x = np.linspace(-4, 4, 8)[:,None] + eta = np.linspace(-10, 10, 5)[None,:] + + p = special.pbdv(eta, x) + eps = 1e-7 + 1e-7*abs(x) + dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2. + assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6) + + def test_pbvv_gradient(self): + x = np.linspace(-4, 4, 8)[:,None] + eta = np.linspace(-10, 10, 5)[None,:] + + p = special.pbvv(eta, x) + eps = 1e-7 + 1e-7*abs(x) + dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2. + assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6) + + def test_pbvv_seq(self): + res1, res2 = special.pbvv_seq(2, 3) + assert_allclose(res1, np.array([2.976319645712036, + 1.358840996329579, + 0.5501016716383508])) + assert_allclose(res2, np.array([3.105638472238475, + 0.9380581512176672, + 0.533688488872053])) + + +class TestPolygamma: + # from Table 6.2 (pg. 271) of A&S + def test_polygamma(self): + poly2 = special.polygamma(2,1) + poly3 = special.polygamma(3,1) + assert_almost_equal(poly2,-2.4041138063,10) + assert_almost_equal(poly3,6.4939394023,10) + + # Test polygamma(0, x) == psi(x) + x = [2, 3, 1.1e14] + assert_almost_equal(special.polygamma(0, x), special.psi(x)) + + # Test broadcasting + n = [0, 1, 2] + x = [0.5, 1.5, 2.5] + expected = [-1.9635100260214238, 0.93480220054467933, + -0.23620405164172739] + assert_almost_equal(special.polygamma(n, x), expected) + expected = np.vstack([expected]*2) + assert_almost_equal(special.polygamma(n, np.vstack([x]*2)), + expected) + assert_almost_equal(special.polygamma(np.vstack([n]*2), x), + expected) + + +class TestProCvSeq: + def test_pro_cv_seq(self): + prol = special.pro_cv_seq(0,3,1) + assert_array_almost_equal(prol,array([0.319000, + 2.593084, + 6.533471, + 12.514462]),5) + + +class TestPsi: + def test_psi(self): + ps = special.psi(1) + assert_almost_equal(ps,-0.57721566490153287,8) + + +class TestRadian: + def test_radian(self): + rad = special.radian(90,0,0) + assert_almost_equal(rad,pi/2.0,5) + + def test_radianmore(self): + rad1 = special.radian(90,1,60) + assert_almost_equal(rad1,pi/2+0.0005816135199345904,5) + + +class TestRiccati: + def test_riccati_jn(self): + N, x = 2, 0.2 + S = np.empty((N, N)) + for n in range(N): + j = special.spherical_jn(n, x) + jp = special.spherical_jn(n, x, derivative=True) + S[0,n] = x*j + S[1,n] = x*jp + j + assert_array_almost_equal(S, special.riccati_jn(n, x), 8) + + def test_riccati_yn(self): + N, x = 2, 0.2 + C = np.empty((N, N)) + for n in range(N): + y = special.spherical_yn(n, x) + yp = special.spherical_yn(n, x, derivative=True) + C[0,n] = x*y + C[1,n] = x*yp + y + assert_array_almost_equal(C, special.riccati_yn(n, x), 8) + + +class TestRound: + def test_round(self): + rnd = list(map(int, (special.round(10.1), + special.round(10.4), + special.round(10.5), + special.round(10.6)))) + + # Note: According to the documentation, scipy.special.round is + # supposed to round to the nearest even number if the fractional + # part is exactly 0.5. On some platforms, this does not appear + # to work and thus this test may fail. However, this unit test is + # correctly written. + rndrl = (10,10,10,11) + assert_array_equal(rnd,rndrl) + + +def test_sph_harm(): + # Tests derived from tables in + # https://en.wikipedia.org/wiki/Table_of_spherical_harmonics + sh = special.sph_harm + pi = np.pi + exp = np.exp + sqrt = np.sqrt + sin = np.sin + cos = np.cos + assert_array_almost_equal(sh(0,0,0,0), + 0.5/sqrt(pi)) + assert_array_almost_equal(sh(-2,2,0.,pi/4), + 0.25*sqrt(15./(2.*pi)) * + (sin(pi/4))**2.) + assert_array_almost_equal(sh(-2,2,0.,pi/2), + 0.25*sqrt(15./(2.*pi))) + assert_array_almost_equal(sh(2,2,pi,pi/2), + 0.25*sqrt(15/(2.*pi)) * + exp(0+2.*pi*1j)*sin(pi/2.)**2.) + assert_array_almost_equal(sh(2,4,pi/4.,pi/3.), + (3./8.)*sqrt(5./(2.*pi)) * + exp(0+2.*pi/4.*1j) * + sin(pi/3.)**2. * + (7.*cos(pi/3.)**2.-1)) + assert_array_almost_equal(sh(4,4,pi/8.,pi/6.), + (3./16.)*sqrt(35./(2.*pi)) * + exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.) + + +def test_sph_harm_ufunc_loop_selection(): + # see https://github.com/scipy/scipy/issues/4895 + dt = np.dtype(np.complex128) + assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt) + assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt) + assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt) + assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt) + assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt) + assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt) + + +class TestStruve: + def _series(self, v, z, n=100): + """Compute Struve function & error estimate from its power series.""" + k = arange(0, n) + r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5) + err = abs(r).max() * finfo(double).eps * n + return r.sum(), err + + def test_vs_series(self): + """Check Struve function versus its power series""" + for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]: + for z in [1, 10, 19, 21, 30]: + value, err = self._series(v, z) + assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z) + + def test_some_values(self): + assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7) + assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8) + assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12) + assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11) + assert_equal(special.struve(-12, -41), -special.struve(-12, 41)) + assert_equal(special.struve(+12, -41), -special.struve(+12, 41)) + assert_equal(special.struve(-11, -41), +special.struve(-11, 41)) + assert_equal(special.struve(+11, -41), +special.struve(+11, 41)) + + assert_(isnan(special.struve(-7.1, -1))) + assert_(isnan(special.struve(-10.1, -1))) + + def test_regression_679(self): + """Regression test for #679""" + assert_allclose(special.struve(-1.0, 20 - 1e-8), + special.struve(-1.0, 20 + 1e-8)) + assert_allclose(special.struve(-2.0, 20 - 1e-8), + special.struve(-2.0, 20 + 1e-8)) + assert_allclose(special.struve(-4.3, 20 - 1e-8), + special.struve(-4.3, 20 + 1e-8)) + + +def test_chi2_smalldf(): + assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110) + + +def test_ch2_inf(): + assert_equal(special.chdtr(0.7,np.inf), 1.0) + + +def test_chi2c_smalldf(): + assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110) + + +def test_chi2_inv_smalldf(): + assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3) + + +def test_agm_simple(): + rtol = 1e-13 + + # Gauss's constant + assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186, + rtol=rtol) + + # These values were computed using Wolfram Alpha, with the + # function ArithmeticGeometricMean[a, b]. + agm13 = 1.863616783244897 + agm15 = 2.604008190530940 + agm35 = 3.936235503649555 + assert_allclose(special.agm([[1], [3]], [1, 3, 5]), + [[1, agm13, agm15], + [agm13, 3, agm35]], rtol=rtol) + + # Computed by the iteration formula using mpmath, + # with mpmath.mp.prec = 1000: + agm12 = 1.4567910310469068 + assert_allclose(special.agm(1, 2), agm12, rtol=rtol) + assert_allclose(special.agm(2, 1), agm12, rtol=rtol) + assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol) + assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol) + assert_allclose(special.agm(13, 123456789.5), 11111458.498599306, + rtol=rtol) + assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol) + assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol) + assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178, + rtol=rtol) + assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177, + rtol=rtol) + assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152, + rtol=rtol) + fi = np.finfo(1.0) + assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305, + rtol=rtol) + assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308, + rtol=rtol) + assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308, + rtol=rtol) + + # zero, nan and inf cases. + assert_equal(special.agm(0, 0), 0) + assert_equal(special.agm(99, 0), 0) + + assert_equal(special.agm(-1, 10), np.nan) + assert_equal(special.agm(0, np.inf), np.nan) + assert_equal(special.agm(np.inf, 0), np.nan) + assert_equal(special.agm(0, -np.inf), np.nan) + assert_equal(special.agm(-np.inf, 0), np.nan) + assert_equal(special.agm(np.inf, -np.inf), np.nan) + assert_equal(special.agm(-np.inf, np.inf), np.nan) + assert_equal(special.agm(1, np.nan), np.nan) + assert_equal(special.agm(np.nan, -1), np.nan) + + assert_equal(special.agm(1, np.inf), np.inf) + assert_equal(special.agm(np.inf, 1), np.inf) + assert_equal(special.agm(-1, -np.inf), -np.inf) + assert_equal(special.agm(-np.inf, -1), -np.inf) + + +def test_legacy(): + # Legacy behavior: truncating arguments to integers + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "floating point number truncated to an integer") + assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3)) + assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3)) + assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3)) + assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3)) + assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3)) + assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3)) + assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3)) + assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3)) + assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3)) + + +@with_special_errors +def test_error_raising(): + assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j) + + +def test_xlogy(): + def xfunc(x, y): + with np.errstate(invalid='ignore'): + if x == 0 and not np.isnan(y): + return x + else: + return x*np.log(y) + + z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float) + z2 = np.r_[z1, [(0, 1j), (1, 1j)]] + + w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) + assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13) + w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1]) + assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13) + + +def test_xlog1py(): + def xfunc(x, y): + with np.errstate(invalid='ignore'): + if x == 0 and not np.isnan(y): + return x + else: + return x * np.log1p(y) + + z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0), + (1, 1e-30)], dtype=float) + w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) + assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13) + + +def test_entr(): + def xfunc(x): + if x < 0: + return -np.inf + else: + return -special.xlogy(x, x) + values = (0, 0.5, 1.0, np.inf) + signs = [-1, 1] + arr = [] + for sgn, v in itertools.product(signs, values): + arr.append(sgn * v) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z) + assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13) + + +def test_kl_div(): + def xfunc(x, y): + if x < 0 or y < 0 or (y == 0 and x != 0): + # extension of natural domain to preserve convexity + return np.inf + elif np.isposinf(x) or np.isposinf(y): + # limits within the natural domain + return np.inf + elif x == 0: + return y + else: + return special.xlogy(x, x/y) - x + y + values = (0, 0.5, 1.0) + signs = [-1, 1] + arr = [] + for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): + arr.append((sgna*va, sgnb*vb)) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13) + + +def test_rel_entr(): + def xfunc(x, y): + if x > 0 and y > 0: + return special.xlogy(x, x/y) + elif x == 0 and y >= 0: + return 0 + else: + return np.inf + values = (0, 0.5, 1.0) + signs = [-1, 1] + arr = [] + for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): + arr.append((sgna*va, sgnb*vb)) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13) + + +def test_huber(): + assert_equal(special.huber(-1, 1.5), np.inf) + assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5)) + assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2)) + + def xfunc(delta, r): + if delta < 0: + return np.inf + elif np.abs(r) < delta: + return 0.5 * np.square(r) + else: + return delta * (np.abs(r) - 0.5 * delta) + + z = np.random.randn(10, 2) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13) + + +def test_pseudo_huber(): + def xfunc(delta, r): + if delta < 0: + return np.inf + elif (not delta) or (not r): + return 0 + else: + return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1) + + z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]]) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13) + + +def test_pseudo_huber_small_r(): + delta = 1.0 + r = 1e-18 + y = special.pseudo_huber(delta, r) + # expected computed with mpmath: + # import mpmath + # mpmath.mp.dps = 200 + # r = mpmath.mpf(1e-18) + # expected = float(mpmath.sqrt(1 + r**2) - 1) + expected = 5.0000000000000005e-37 + assert_allclose(y, expected, rtol=1e-13) + + +def test_runtime_warning(): + with pytest.warns(RuntimeWarning, + match=r'Too many predicted coefficients'): + mathieu_odd_coef(1000, 1000) + with pytest.warns(RuntimeWarning, + match=r'Too many predicted coefficients'): + mathieu_even_coef(1000, 1000) + + +class TestStirling2: + table = [ + [1], + [0, 1], + [0, 1, 1], + [0, 1, 3, 1], + [0, 1, 7, 6, 1], + [0, 1, 15, 25, 10, 1], + [0, 1, 31, 90, 65, 15, 1], + [0, 1, 63, 301, 350, 140, 21, 1], + [0, 1, 127, 966, 1701, 1050, 266, 28, 1], + [0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1], + [0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1], + ] + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_table_cases(self, is_exact, comp, kwargs): + for n in range(1, len(self.table)): + k_values = list(range(n+1)) + row = self.table[n] + comp(row, stirling2([n], k_values, exact=is_exact), **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_valid_single_integer(self, is_exact, comp, kwargs): + comp(stirling2(0, 0, exact=is_exact), self.table[0][0], **kwargs) + comp(stirling2(4, 2, exact=is_exact), self.table[4][2], **kwargs) + # a single 2-tuple of integers as arguments must return an int and not + # an array whereas arrays of single values should return array + comp(stirling2(5, 3, exact=is_exact), 25, **kwargs) + comp(stirling2([5], [3], exact=is_exact), [25], **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_negative_integer(self, is_exact, comp, kwargs): + # negative integers for n or k arguments return 0 + comp(stirling2(-1, -1, exact=is_exact), 0, **kwargs) + comp(stirling2(-1, 2, exact=is_exact), 0, **kwargs) + comp(stirling2(2, -1, exact=is_exact), 0, **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-12}) + ]) + def test_array_inputs(self, is_exact, comp, kwargs): + ans = [self.table[10][3], self.table[10][4]] + comp(stirling2(asarray([10, 10]), + asarray([3, 4]), + exact=is_exact), + ans) + comp(stirling2([10, 10], + asarray([3, 4]), + exact=is_exact), + ans) + comp(stirling2(asarray([10, 10]), + [3, 4], + exact=is_exact), + ans) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-13}) + ]) + def test_mixed_values(self, is_exact, comp, kwargs): + # negative values-of either n or k-should return 0 for the entry + ans = [0, 1, 3, 25, 1050, 5880, 9330] + n = [-1, 0, 3, 5, 8, 10, 10] + k = [-2, 0, 2, 3, 5, 7, 3] + comp(stirling2(n, k, exact=is_exact), ans, **kwargs) + + def test_correct_parity(self): + """Test parity follows well known identity. + + en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind#Parity + """ + n, K = 100, np.arange(101) + assert_equal( + stirling2(n, K, exact=True) % 2, + [math.comb(n - (k // 2) - 1, n - k) % 2 for k in K], + ) + + def test_big_numbers(self): + # via mpmath (bigger than 32bit) + ans = asarray([48063331393110, 48004081105038305]) + n = [25, 30] + k = [17, 4] + assert array_equal(stirling2(n, k, exact=True), ans) + # bigger than 64 bit + ans = asarray([2801934359500572414253157841233849412, + 14245032222277144547280648984426251]) + n = [42, 43] + k = [17, 23] + assert array_equal(stirling2(n, k, exact=True), ans) + + @pytest.mark.parametrize("N", [4.5, 3., 4+1j, "12", np.nan]) + @pytest.mark.parametrize("K", [3.5, 3, "2", None]) + @pytest.mark.parametrize("is_exact", [True, False]) + def test_unsupported_input_types(self, N, K, is_exact): + # object, float, string, complex are not supported and raise TypeError + with pytest.raises(TypeError): + stirling2(N, K, exact=is_exact) + + @pytest.mark.parametrize("is_exact", [True, False]) + def test_numpy_array_int_object_dtype(self, is_exact): + # python integers with arbitrary precision are *not* allowed as + # object type in numpy arrays are inconsistent from api perspective + ans = asarray(self.table[4][1:]) + n = asarray([4, 4, 4, 4], dtype=object) + k = asarray([1, 2, 3, 4], dtype=object) + with pytest.raises(TypeError): + array_equal(stirling2(n, k, exact=is_exact), ans) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-13}) + ]) + def test_numpy_array_unsigned_int_dtype(self, is_exact, comp, kwargs): + # numpy unsigned integers are allowed as dtype in numpy arrays + ans = asarray(self.table[4][1:]) + n = asarray([4, 4, 4, 4], dtype=np_ulong) + k = asarray([1, 2, 3, 4], dtype=np_ulong) + comp(stirling2(n, k, exact=False), ans, **kwargs) + + @pytest.mark.parametrize("is_exact, comp, kwargs", [ + (True, assert_equal, {}), + (False, assert_allclose, {'rtol': 1e-13}) + ]) + def test_broadcasting_arrays_correctly(self, is_exact, comp, kwargs): + # broadcasting is handled by stirling2 + # test leading 1s are replicated + ans = asarray([[1, 15, 25, 10], [1, 7, 6, 1]]) # shape (2,4) + n = asarray([[5, 5, 5, 5], [4, 4, 4, 4]]) # shape (2,4) + k = asarray([1, 2, 3, 4]) # shape (4,) + comp(stirling2(n, k, exact=is_exact), ans, **kwargs) + # test that dims both mismatch broadcast correctly (5,1) & (6,) + n = asarray([[4], [4], [4], [4], [4]]) + k = asarray([0, 1, 2, 3, 4, 5]) + ans = asarray([[0, 1, 7, 6, 1, 0] for _ in range(5)]) + comp(stirling2(n, k, exact=False), ans, **kwargs) + + def test_temme_rel_max_error(self): + # python integers with arbitrary precision are *not* allowed as + # object type in numpy arrays are inconsistent from api perspective + x = list(range(51, 101, 5)) + for n in x: + k_entries = list(range(1, n+1)) + denom = stirling2([n], k_entries, exact=True) + num = denom - stirling2([n], k_entries, exact=False) + assert np.max(np.abs(num / denom)) < 2e-5 diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py new file mode 100644 index 0000000000000000000000000000000000000000..ca3e82299824b1b349ef46f0266230fc3ad2fa7e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py @@ -0,0 +1,527 @@ +""" +Test cdflib functions versus mpmath, if available. + +The following functions still need tests: + +- ncfdtr +- ncfdtri +- ncfdtridfn +- ncfdtridfd +- ncfdtrinc +- nbdtrik +- nbdtrin +- pdtrik +- nctdtr +- nctdtrit +- nctdtridf +- nctdtrinc + +""" +import itertools + +import numpy as np +from numpy.testing import assert_equal, assert_allclose +import pytest + +import scipy.special as sp +from scipy.special._testutils import ( + MissingModule, check_version, FuncData) +from scipy.special._mptestutils import ( + Arg, IntArg, get_args, mpf2float, assert_mpmath_equal) + +try: + import mpmath +except ImportError: + mpmath = MissingModule('mpmath') + + +class ProbArg: + """Generate a set of probabilities on [0, 1].""" + + def __init__(self): + # Include the endpoints for compatibility with Arg et. al. + self.a = 0 + self.b = 1 + + def values(self, n): + """Return an array containing approximately n numbers.""" + m = max(1, n//3) + v1 = np.logspace(-30, np.log10(0.3), m) + v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:] + v3 = 1 - np.logspace(np.log10(0.3), -15, m) + v = np.r_[v1, v2, v3] + return np.unique(v) + + +class EndpointFilter: + def __init__(self, a, b, rtol, atol): + self.a = a + self.b = b + self.rtol = rtol + self.atol = atol + + def __call__(self, x): + mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol + mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol + return np.where(mask1 | mask2, False, True) + + +class _CDFData: + def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True, + dps=20, n=5000, rtol=None, atol=None, + endpt_rtol=None, endpt_atol=None): + self.spfunc = spfunc + self.mpfunc = mpfunc + self.index = index + self.argspec = argspec + self.spfunc_first = spfunc_first + self.dps = dps + self.n = n + self.rtol = rtol + self.atol = atol + + if not isinstance(argspec, list): + self.endpt_rtol = None + self.endpt_atol = None + elif endpt_rtol is not None or endpt_atol is not None: + if isinstance(endpt_rtol, list): + self.endpt_rtol = endpt_rtol + else: + self.endpt_rtol = [endpt_rtol]*len(self.argspec) + if isinstance(endpt_atol, list): + self.endpt_atol = endpt_atol + else: + self.endpt_atol = [endpt_atol]*len(self.argspec) + else: + self.endpt_rtol = None + self.endpt_atol = None + + def idmap(self, *args): + if self.spfunc_first: + res = self.spfunc(*args) + if np.isnan(res): + return np.nan + args = list(args) + args[self.index] = res + with mpmath.workdps(self.dps): + res = self.mpfunc(*tuple(args)) + # Imaginary parts are spurious + res = mpf2float(res.real) + else: + with mpmath.workdps(self.dps): + res = self.mpfunc(*args) + res = mpf2float(res.real) + args = list(args) + args[self.index] = res + res = self.spfunc(*tuple(args)) + return res + + def get_param_filter(self): + if self.endpt_rtol is None and self.endpt_atol is None: + return None + + filters = [] + for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec): + if rtol is None and atol is None: + filters.append(None) + continue + elif rtol is None: + rtol = 0.0 + elif atol is None: + atol = 0.0 + + filters.append(EndpointFilter(spec.a, spec.b, rtol, atol)) + return filters + + def check(self): + # Generate values for the arguments + args = get_args(self.argspec, self.n) + param_filter = self.get_param_filter() + param_columns = tuple(range(args.shape[1])) + result_columns = args.shape[1] + args = np.hstack((args, args[:, self.index].reshape(args.shape[0], 1))) + FuncData(self.idmap, args, + param_columns=param_columns, result_columns=result_columns, + rtol=self.rtol, atol=self.atol, vectorized=False, + param_filter=param_filter).check() + + +def _assert_inverts(*a, **kw): + d = _CDFData(*a, **kw) + d.check() + + +def _binomial_cdf(k, n, p): + k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p) + if k <= 0: + return mpmath.mpf(0) + elif k >= n: + return mpmath.mpf(1) + + onemp = mpmath.fsub(1, p, exact=True) + return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True) + + +def _f_cdf(dfn, dfd, x): + if x < 0: + return mpmath.mpf(0) + dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x) + ub = dfn*x/(dfn*x + dfd) + res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True) + return res + + +def _student_t_cdf(df, t, dps=None): + if dps is None: + dps = mpmath.mp.dps + with mpmath.workdps(dps): + df, t = mpmath.mpf(df), mpmath.mpf(t) + fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df) + fac *= t*mpmath.gamma(0.5*(df + 1)) + fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df) + return 0.5 + fac + + +def _noncentral_chi_pdf(t, df, nc): + res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t)) + res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2 + return res + + +def _noncentral_chi_cdf(x, df, nc, dps=None): + if dps is None: + dps = mpmath.mp.dps + x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc) + with mpmath.workdps(dps): + res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x]) + return res + + +def _tukey_lmbda_quantile(p, lmbda): + # For lmbda != 0 + return (p**lmbda - (1 - p)**lmbda)/lmbda + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +class TestCDFlib: + + @pytest.mark.xfail(run=False) + def test_bdtrik(self): + _assert_inverts( + sp.bdtrik, + _binomial_cdf, + 0, [ProbArg(), IntArg(1, 1000), ProbArg()], + rtol=1e-4) + + def test_bdtrin(self): + _assert_inverts( + sp.bdtrin, + _binomial_cdf, + 1, [IntArg(1, 1000), ProbArg(), ProbArg()], + rtol=1e-4, endpt_atol=[None, None, 1e-6]) + + def test_btdtria(self): + _assert_inverts( + sp.btdtria, + lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True), + 0, [ProbArg(), Arg(0, 1e2, inclusive_a=False), + Arg(0, 1, inclusive_a=False, inclusive_b=False)], + rtol=1e-6) + + def test_btdtrib(self): + # Use small values of a or mpmath doesn't converge + _assert_inverts( + sp.btdtrib, + lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True), + 1, + [Arg(0, 1e2, inclusive_a=False), ProbArg(), + Arg(0, 1, inclusive_a=False, inclusive_b=False)], + rtol=1e-7, + endpt_atol=[None, 1e-18, 1e-15]) + + @pytest.mark.xfail(run=False) + def test_fdtridfd(self): + _assert_inverts( + sp.fdtridfd, + _f_cdf, + 1, + [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)], + rtol=1e-7) + + def test_gdtria(self): + _assert_inverts( + sp.gdtria, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 0, + [ProbArg(), Arg(0, 1e3, inclusive_a=False), + Arg(0, 1e4, inclusive_a=False)], + rtol=1e-7, + endpt_atol=[None, 1e-7, 1e-10]) + + def test_gdtrib(self): + # Use small values of a and x or mpmath doesn't converge + _assert_inverts( + sp.gdtrib, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 1, + [Arg(0, 1e2, inclusive_a=False), ProbArg(), + Arg(0, 1e3, inclusive_a=False)], + rtol=1e-5) + + def test_gdtrix(self): + _assert_inverts( + sp.gdtrix, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 2, + [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False), + ProbArg()], + rtol=1e-7, + endpt_atol=[None, 1e-7, 1e-10]) + + # Overall nrdtrimn and nrdtrisd are not performing well with infeasible/edge + # combinations of sigma and x, hence restricted the domains to still use the + # testing machinery, also see gh-20069 + + # nrdtrimn signature: p, sd, x + # nrdtrisd signature: mn, p, x + def test_nrdtrimn(self): + _assert_inverts( + sp.nrdtrimn, + lambda x, y, z: mpmath.ncdf(z, x, y), + 0, + [ProbArg(), # CDF value p + Arg(0.1, np.inf, inclusive_a=False, inclusive_b=False), # sigma + Arg(-1e10, 1e10)], # x + rtol=1e-5) + + def test_nrdtrisd(self): + _assert_inverts( + sp.nrdtrisd, + lambda x, y, z: mpmath.ncdf(z, x, y), + 1, + [Arg(-np.inf, 10, inclusive_a=False, inclusive_b=False), # mn + ProbArg(), # CDF value p + Arg(10, 1e100)], # x + rtol=1e-5) + + def test_stdtr(self): + # Ideally the left endpoint for Arg() should be 0. + assert_mpmath_equal( + sp.stdtr, + _student_t_cdf, + [IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7) + + @pytest.mark.xfail(run=False) + def test_stdtridf(self): + _assert_inverts( + sp.stdtridf, + _student_t_cdf, + 0, [ProbArg(), Arg()], rtol=1e-7) + + def test_stdtrit(self): + _assert_inverts( + sp.stdtrit, + _student_t_cdf, + 1, [IntArg(1, 100), ProbArg()], rtol=1e-7, + endpt_atol=[None, 1e-10]) + + def test_chdtriv(self): + _assert_inverts( + sp.chdtriv, + lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True), + 0, [ProbArg(), IntArg(1, 100)], rtol=1e-4) + + @pytest.mark.xfail(run=False) + def test_chndtridf(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtridf, + _noncentral_chi_cdf, + 1, [Arg(0, 100, inclusive_a=False), ProbArg(), + Arg(0, 100, inclusive_a=False)], + n=1000, rtol=1e-4, atol=1e-15) + + @pytest.mark.xfail(run=False) + def test_chndtrinc(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtrinc, + _noncentral_chi_cdf, + 2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()], + n=1000, rtol=1e-4, atol=1e-15) + + def test_chndtrix(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtrix, + _noncentral_chi_cdf, + 0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)], + n=1000, rtol=1e-4, atol=1e-15, + endpt_atol=[1e-6, None, None]) + + def test_tklmbda_zero_shape(self): + # When lmbda = 0 the CDF has a simple closed form + one = mpmath.mpf(1) + assert_mpmath_equal( + lambda x: sp.tklmbda(x, 0), + lambda x: one/(mpmath.exp(-x) + one), + [Arg()], rtol=1e-7) + + def test_tklmbda_neg_shape(self): + _assert_inverts( + sp.tklmbda, + _tukey_lmbda_quantile, + 0, [ProbArg(), Arg(-25, 0, inclusive_b=False)], + spfunc_first=False, rtol=1e-5, + endpt_atol=[1e-9, 1e-5]) + + @pytest.mark.xfail(run=False) + def test_tklmbda_pos_shape(self): + _assert_inverts( + sp.tklmbda, + _tukey_lmbda_quantile, + 0, [ProbArg(), Arg(0, 100, inclusive_a=False)], + spfunc_first=False, rtol=1e-5) + + # The values of lmdba are chosen so that 1/lmbda is exact. + @pytest.mark.parametrize('lmbda', [0.5, 1.0, 8.0]) + def test_tklmbda_lmbda1(self, lmbda): + bound = 1/lmbda + assert_equal(sp.tklmbda([-bound, bound], lmbda), [0.0, 1.0]) + + +funcs = [ + ("btdtria", 3), + ("btdtrib", 3), + ("bdtrik", 3), + ("bdtrin", 3), + ("chdtriv", 2), + ("chndtr", 3), + ("chndtrix", 3), + ("chndtridf", 3), + ("chndtrinc", 3), + ("fdtridfd", 3), + ("ncfdtr", 4), + ("ncfdtri", 4), + ("ncfdtridfn", 4), + ("ncfdtridfd", 4), + ("ncfdtrinc", 4), + ("gdtrix", 3), + ("gdtrib", 3), + ("gdtria", 3), + ("nbdtrik", 3), + ("nbdtrin", 3), + ("nrdtrimn", 3), + ("nrdtrisd", 3), + ("pdtrik", 2), + ("stdtr", 2), + ("stdtrit", 2), + ("stdtridf", 2), + ("nctdtr", 3), + ("nctdtrit", 3), + ("nctdtridf", 3), + ("nctdtrinc", 3), + ("tklmbda", 2), +] + + +@pytest.mark.parametrize('func,numargs', funcs, ids=[x[0] for x in funcs]) +def test_nonfinite(func, numargs): + + rng = np.random.default_rng(1701299355559735) + func = getattr(sp, func) + args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in rng.random(numargs)] + + for args in itertools.product(*args_choices): + res = func(*args) + + if any(np.isnan(x) for x in args): + # Nan inputs should result to nan output + assert_equal(res, np.nan) + else: + # All other inputs should return something (but not + # raise exceptions or cause hangs) + pass + + +def test_chndtrix_gh2158(): + # test that gh-2158 is resolved; previously this blew up + res = sp.chndtrix(0.999999, 2, np.arange(20.)+1e-6) + + # Generated in R + # options(digits=16) + # ncp <- seq(0, 19) + 1e-6 + # print(qchisq(0.999999, df = 2, ncp = ncp)) + res_exp = [27.63103493142305, 35.25728589950540, 39.97396073236288, + 43.88033702110538, 47.35206403482798, 50.54112500166103, + 53.52720257322766, 56.35830042867810, 59.06600769498512, + 61.67243118946381, 64.19376191277179, 66.64228141346548, + 69.02756927200180, 71.35726934749408, 73.63759723904816, + 75.87368842650227, 78.06984431185720, 80.22971052389806, + 82.35640899964173, 84.45263768373256] + assert_allclose(res, res_exp) + +@pytest.mark.xfail_on_32bit("32bit fails due to algorithm threshold") +def test_nctdtr_gh19896(): + # test that gh-19896 is resolved. + # Compared to SciPy 1.11 results from Fortran code. + dfarr = [0.98, 9.8, 98, 980] + pnoncarr = [-3.8, 0.38, 3.8, 38] + tarr = [0.0015, 0.15, 1.5, 15] + resarr = [0.9999276519560749, 0.9999276519560749, 0.9999908831755221, + 0.9999990265452424, 0.3524153312279712, 0.39749697267251416, + 0.7168629634895805, 0.9656246449259646, 7.234804392512006e-05, + 7.234804392512006e-05, 0.03538804607509127, 0.795482701508521, + 0.0, 0.0, 0.0, + 0.011927908523093889, 0.9999276519560749, 0.9999276519560749, + 0.9999997441133123, 1.0, 0.3525155979118013, + 0.4076312014048369, 0.8476794017035086, 0.9999999297116268, + 7.234804392512006e-05, 7.234804392512006e-05, 0.013477443099785824, + 0.9998501512331494, 0.0, 0.0, + 0.0, 6.561112613212572e-07, 0.9999276519560749, + 0.9999276519560749, 0.9999999313496014, 1.0, + 0.3525281784865706, 0.40890253001898014, 0.8664672830017024, + 1.0, 7.234804392512006e-05, 7.234804392512006e-05, + 0.010990889489704836, 1.0, 0.0, + 0.0, 0.0, 0.0, + 0.9999276519560749, 0.9999276519560749, 0.9999999418789304, + 1.0, 0.35252945487817355, 0.40903153246690993, + 0.8684247068528264, 1.0, 7.234804392512006e-05, + 7.234804392512006e-05, 0.01075068918582911, 1.0, + 0.0, 0.0, 0.0, 0.0] + actarr = [] + for df, p, t in itertools.product(dfarr, pnoncarr, tarr): + actarr += [sp.nctdtr(df, p, t)] + # The rtol is kept high on purpose to make it pass on 32bit systems + assert_allclose(actarr, resarr, rtol=1e-6, atol=0.0) + + +def test_nctdtrinc_gh19896(): + # test that gh-19896 is resolved. + # Compared to SciPy 1.11 results from Fortran code. + dfarr = [0.001, 0.98, 9.8, 98, 980, 10000, 98, 9.8, 0.98, 0.001] + parr = [0.001, 0.1, 0.3, 0.8, 0.999, 0.001, 0.1, 0.3, 0.8, 0.999] + tarr = [0.0015, 0.15, 1.5, 15, 300, 0.0015, 0.15, 1.5, 15, 300] + desired = [3.090232306168629, 1.406141304556198, 2.014225177124157, + 13.727067118283456, 278.9765683871208, 3.090232306168629, + 1.4312427877936222, 2.014225177124157, 3.712743137978295, + -3.086951096691082] + actual = sp.nctdtrinc(dfarr, parr, tarr) + assert_allclose(actual, desired, rtol=5e-12, atol=0.0) + + +def test_stdtr_stdtrit_neg_inf(): + # -inf was treated as +inf and values from the normal were returned + assert np.all(np.isnan(sp.stdtr(-np.inf, [-np.inf, -1.0, 0.0, 1.0, np.inf]))) + assert np.all(np.isnan(sp.stdtrit(-np.inf, [0.0, 0.25, 0.5, 0.75, 1.0]))) + + +def test_bdtrik_nbdtrik_inf(): + y = np.array( + [np.nan,-np.inf,-10.0, -1.0, 0.0, .00001, .5, 0.9999, 1.0, 10.0, np.inf]) + y = y[:,None] + p = np.atleast_2d( + [np.nan, -np.inf, -10.0, -1.0, 0.0, .00001, .5, 1.0, np.inf]) + assert np.all(np.isnan(sp.bdtrik(y, np.inf, p))) + assert np.all(np.isnan(sp.nbdtrik(y, np.inf, p))) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py new file mode 100644 index 0000000000000000000000000000000000000000..8b1ad41243f0865c205963d938ab61a346ee8e88 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py @@ -0,0 +1,49 @@ +# gh-14777 regression tests +# Test stdtr and stdtrit with infinite df and large values of df + +import numpy as np +from numpy.testing import assert_allclose, assert_equal +from scipy.special import stdtr, stdtrit, ndtr, ndtri + + +def test_stdtr_vs_R_large_df(): + df = [1e10, 1e12, 1e120, np.inf] + t = 1. + res = stdtr(df, t) + # R Code: + # options(digits=20) + # pt(1., c(1e10, 1e12, 1e120, Inf)) + res_R = [0.84134474605644460343, + 0.84134474606842180044, + 0.84134474606854281475, + 0.84134474606854292578] + assert_allclose(res, res_R, rtol=2e-15) + # last value should also agree with ndtr + assert_equal(res[3], ndtr(1.)) + + +def test_stdtrit_vs_R_large_df(): + df = [1e10, 1e12, 1e120, np.inf] + p = 0.1 + res = stdtrit(df, p) + # R Code: + # options(digits=20) + # qt(0.1, c(1e10, 1e12, 1e120, Inf)) + res_R = [-1.2815515656292593150, + -1.2815515655454472466, + -1.2815515655446008125, + -1.2815515655446008125] + assert_allclose(res, res_R, rtol=1e-14, atol=1e-15) + # last value should also agree with ndtri + assert_equal(res[3], ndtri(0.1)) + + +def test_stdtr_stdtri_invalid(): + # a mix of large and inf df with t/p equal to nan + df = [1e10, 1e12, 1e120, np.inf] + x = np.nan + res1 = stdtr(df, x) + res2 = stdtrit(df, x) + res_ex = 4*[np.nan] + assert_equal(res1, res_ex) + assert_equal(res2, res_ex) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_data.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..40e3eed18ca6816c1c541bb61ecab660447b38f9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_data.py @@ -0,0 +1,725 @@ +import importlib.resources + +import numpy as np +from numpy.testing import suppress_warnings +import pytest + +from scipy.special import ( + lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite, + eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta, + jn, jv, jvp, yn, yv, yvp, iv, ivp, kn, kv, kvp, + gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma, + beta, betainc, betaincinv, poch, + ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc, + elliprc, elliprd, elliprf, elliprg, elliprj, + erf, erfc, erfinv, erfcinv, exp1, expi, expn, + bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib, + nbdtrik, pdtrik, owens_t, + mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1, + mathieu_modsem1, mathieu_modcem2, mathieu_modsem2, + ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, wright_bessel +) +from scipy.integrate import IntegrationWarning + +from scipy.special._testutils import FuncData + + +# The npz files are generated, and hence may live in the build dir. We can only +# access them through `importlib.resources`, not an explicit path from `__file__` +_datadir = importlib.resources.files('scipy.special.tests.data') + +_boost_npz = _datadir.joinpath('boost.npz') +with importlib.resources.as_file(_boost_npz) as f: + DATASETS_BOOST = np.load(f) + +_gsl_npz = _datadir.joinpath('gsl.npz') +with importlib.resources.as_file(_gsl_npz) as f: + DATASETS_GSL = np.load(f) + +_local_npz = _datadir.joinpath('local.npz') +with importlib.resources.as_file(_local_npz) as f: + DATASETS_LOCAL = np.load(f) + + +def data(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_BOOST[dataname], *a, **kw) + + +def data_gsl(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_GSL[dataname], *a, **kw) + + +def data_local(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw) + + +def ellipk_(k): + return ellipk(k*k) + + +def ellipkinc_(f, k): + return ellipkinc(f, k*k) + + +def ellipe_(k): + return ellipe(k*k) + + +def ellipeinc_(f, k): + return ellipeinc(f, k*k) + + +def zeta_(x): + return zeta(x, 1.) + + +def assoc_legendre_p_boost_(nu, mu, x): + # the boost test data is for integer orders only + return lpmv(mu, nu.astype(int), x) + +def legendre_p_via_assoc_(nu, x): + return lpmv(0, nu, x) + +def lpn_(n, x): + return lpn(n.astype('l'), x)[0][-1] + +def lqn_(n, x): + return lqn(n.astype('l'), x)[0][-1] + +def legendre_p_via_lpmn(n, x): + return lpmn(0, n, x)[0][0,-1] + +def legendre_q_via_lqmn(n, x): + return lqmn(0, n, x)[0][0,-1] + +def mathieu_ce_rad(m, q, x): + return mathieu_cem(m, q, x*180/np.pi)[0] + + +def mathieu_se_rad(m, q, x): + return mathieu_sem(m, q, x*180/np.pi)[0] + + +def mathieu_mc1_scaled(m, q, x): + # GSL follows a different normalization. + # We follow Abramowitz & Stegun, they apparently something else. + return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_ms1_scaled(m, q, x): + return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_mc2_scaled(m, q, x): + return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_ms2_scaled(m, q, x): + return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2) + +def eval_legendre_ld(n, x): + return eval_legendre(n.astype('l'), x) + +def eval_legendre_dd(n, x): + return eval_legendre(n.astype('d'), x) + +def eval_hermite_ld(n, x): + return eval_hermite(n.astype('l'), x) + +def eval_laguerre_ld(n, x): + return eval_laguerre(n.astype('l'), x) + +def eval_laguerre_dd(n, x): + return eval_laguerre(n.astype('d'), x) + +def eval_genlaguerre_ldd(n, a, x): + return eval_genlaguerre(n.astype('l'), a, x) + +def eval_genlaguerre_ddd(n, a, x): + return eval_genlaguerre(n.astype('d'), a, x) + +def bdtrik_comp(y, n, p): + return bdtrik(1-y, n, p) + +def btdtri_comp(a, b, p): + return btdtri(a, b, 1-p) + +def btdtria_comp(p, b, x): + return btdtria(1-p, b, x) + +def btdtrib_comp(a, p, x): + return btdtrib(a, 1-p, x) + +def gdtr_(p, x): + return gdtr(1.0, p, x) + +def gdtrc_(p, x): + return gdtrc(1.0, p, x) + +def gdtrix_(b, p): + return gdtrix(1.0, b, p) + +def gdtrix_comp(b, p): + return gdtrix(1.0, b, 1-p) + +def gdtrib_(p, x): + return gdtrib(1.0, p, x) + +def gdtrib_comp(p, x): + return gdtrib(1.0, 1-p, x) + +def nbdtrik_comp(y, n, p): + return nbdtrik(1-y, n, p) + +def pdtrik_comp(p, m): + return pdtrik(1-p, m) + +def poch_(z, m): + return 1.0 / poch(z, m) + +def poch_minus(z, m): + return 1.0 / poch(z, -m) + +def spherical_jn_(n, x): + return spherical_jn(n.astype('l'), x) + +def spherical_yn_(n, x): + return spherical_yn(n.astype('l'), x) + +def sph_harm_(m, n, theta, phi): + y = sph_harm(m, n, theta, phi) + return (y.real, y.imag) + +def cexpm1(x, y): + z = expm1(x + 1j*y) + return z.real, z.imag + +def clog1p(x, y): + z = log1p(x + 1j*y) + return z.real, z.imag + + +BOOST_TESTS = [ + data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', + (0,1,2), 3, rtol=1e-11), + + data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=1e-11), + data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=9.6e-14), + data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=5e-14, vectorized=False), + data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=9.6e-14, vectorized=False), + data(lpn_, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=5e-14, vectorized=False), + data(lpn_, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=3e-13, vectorized=False), + data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=6e-14), + data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=2e-13), + data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', + (0,1), 2, rtol=2e-14), + data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 2, rtol=2e-13), + + data(lqn_, 'legendre_p_ipp-legendre_p', + (0,1), 3, rtol=2e-14, vectorized=False), + data(lqn_, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 3, rtol=2e-12, vectorized=False), + data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', + (0,1), 3, rtol=2e-14, vectorized=False), + data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', + (0,1), 3, rtol=2e-12, vectorized=False), + + data(beta, 'beta_exp_data_ipp-beta_exp_data', + (0,1), 2, rtol=1e-13), + data(beta, 'beta_exp_data_ipp-beta_exp_data', + (0,1), 2, rtol=1e-13), + data(beta, 'beta_med_data_ipp-beta_med_data', + (0,1), 2, rtol=5e-13), + + data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', + (0,1,2), 5, rtol=6e-15), + data(betainc, 'ibeta_data_ipp-ibeta_data', + (0,1,2), 5, rtol=5e-13), + data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', + (0,1,2), 5, rtol=2e-14), + data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', + (0,1,2), 5, rtol=4e-10), + + data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', + (0,1,2), 3, rtol=1e-5), + + data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', + (0,1,2), 5, rtol=6e-15), + data(btdtr, 'ibeta_data_ipp-ibeta_data', + (0,1,2), 5, rtol=4e-13), + data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', + (0,1,2), 5, rtol=2e-14), + data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', + (0,1,2), 5, rtol=4e-10), + + data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', + (0,1,2), 3, rtol=1e-5), + data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', + (0,1,2), 4, rtol=8e-7), + + data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', + (2,0,1), 3, rtol=5e-9), + data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', + (2,0,1), 4, rtol=5e-9), + + data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', + (0,2,1), 5, rtol=5e-9), + data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', + (0,2,1), 6, rtol=5e-9), + + data(binom, 'binomial_data_ipp-binomial_data', + (0,1), 2, rtol=1e-13), + data(binom, 'binomial_large_data_ipp-binomial_large_data', + (0,1), 2, rtol=5e-13), + + data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', + (2,0,1), 3, rtol=5e-9), + data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', + (2,0,1), 4, rtol=5e-9), + + data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', + (2,0,1), 3, rtol=4e-9), + data(nbdtrik_comp, + 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', + (2,0,1), 4, rtol=4e-9), + + data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', + (1,0), 2, rtol=3e-9), + data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', + (1,0), 3, rtol=4e-9), + + data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0), + + data(digamma, 'digamma_data_ipp-digamma_data', 0, 1), + data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1), + data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13), + data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13), + data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15), + data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15), + data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15), + data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14), + + data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1), + data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14), + data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1), + data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14), + + data(erf, 'erf_data_ipp-erf_data', 0, 1), + data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13), + data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15), + data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1), + data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1), + data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14), + data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1), + data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13), + data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2), + + data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1), + data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1), + data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data', 0, 1, + param_filter=(lambda s: s > 0)), + + data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13), + data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9), + data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13), + data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13), + data(expi, 'expinti_data_long_ipp-expinti_data_long', 0, 1), + + data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2), + data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14), + + data(gamma, 'test_gamma_data_ipp-near_0', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_1', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_2', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12), + data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14), + data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13), + data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10), + data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2), + + data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15), + data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), + data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), + data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12), + + data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13), + data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), + data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), + data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9), + + data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', + (0,1), 3, rtol=1e-13), + data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', + (0,1), 3, rtol=2e-13), + data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', + (0,1), 3, rtol=4e-14), + data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', + (0,1), 3, rtol=1e-11), + + data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13), + data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13), + data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14), + data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11), + + data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9), + data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9), + + data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', + (0,1), 2, rtol=2e-13), + data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', + (0,1), 2,), + data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', + (0,1), 2,), + data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', + (0,1), 3, rtol=2e-13), + data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', + (0,1), 3), + data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', + (0,1), 3), + + data(eval_hermite_ld, 'hermite_ipp-hermite', + (0,1), 2, rtol=2e-14), + + data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', + (0,1), 2, rtol=7e-12), + data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', + (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'), + data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', + (0,1,2), 3, rtol=2e-13), + data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', + (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'), + + data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1), + data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2), + + data(iv, 'bessel_i_data_ipp-bessel_i_data', + (0,1), 2, rtol=1e-12), + data(iv, 'bessel_i_data_ipp-bessel_i_data', + (0,1j), 2, rtol=2e-10, atol=1e-306), + data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', + (0,1), 2, rtol=1e-9), + data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', + (0,1j), 2, rtol=2e-10), + + data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', + (0,1), 2, rtol=1.2e-13), + data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', + (0,1j), 2, rtol=1.2e-13, atol=1e-300), + + data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), + data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), + data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11), + data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11), + + data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), + data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), + data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12), + data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12), + + data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', + (0,1), 2, rtol=1e-13), + data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', + (0,1j), 2, rtol=1e-13), + data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', + (0,1), 2, rtol=1e-11), + data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', + (0,1j), 2, rtol=2e-11), + + data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), + + data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), + data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12), + data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12), + data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12), + + data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', + (0,1), 2, rtol=3e-14), + data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', + (0,1j), 2, rtol=3e-14), + data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1), 2, rtol=7e-14), + data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1j), 2, rtol=7e-14), + + data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12), + data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), + + data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), + data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12), + data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10), + data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10), + + data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', + (0, 1), 2, rtol=4e-9), + data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', + (0, 1j), 2, rtol=4e-9), + + data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, + param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, + param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, + param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, + param_filter=(lambda s: s > 1)), + + data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 2, rtol=1e-11), + data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 2, rtol=1e-14), + data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 2, rtol=1e-11), + + data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 3, rtol=1e-12), + data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 3, rtol=1e-14), + data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 3, rtol=1e-14), + + data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'), + data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 2, rtol=3e-15), + data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 2), + data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', + (0,1), 2, knownfailure='gdtrix bad some points'), + data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', + (0,1), 3, rtol=6e-15), + data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', + (0,1), 3), + + data(chndtr, 'nccs_ipp-nccs', + (2,0,1), 3, rtol=3e-5), + data(chndtr, 'nccs_big_ipp-nccs_big', + (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'), + + data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', + (1,0,3,2), (4,5), rtol=5e-11, + param_filter=(lambda p: np.ones(p.shape, '?'), + lambda p: np.ones(p.shape, '?'), + lambda p: np.logical_and(p < 2*np.pi, p >= 0), + lambda p: np.logical_and(p < np.pi, p >= 0))), + + data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', + (0,1), 2, rtol=1e-13), + data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', + (0,1), 2, rtol=8e-15), + + data(owens_t, 'owens_t_ipp-owens_t', + (0, 1), 2, rtol=5e-14), + data(owens_t, 'owens_t_large_data_ipp-owens_t_large_data', + (0, 1), 2, rtol=8e-12), + + # -- test data exists in boost but is not used in scipy -- + + # ibeta_derivative_data_ipp/ibeta_derivative_data.txt + # ibeta_derivative_int_data_ipp/ibeta_derivative_int_data.txt + # ibeta_derivative_large_data_ipp/ibeta_derivative_large_data.txt + # ibeta_derivative_small_data_ipp/ibeta_derivative_small_data.txt + + # bessel_y01_prime_data_ipp/bessel_y01_prime_data.txt + # bessel_yn_prime_data_ipp/bessel_yn_prime_data.txt + # sph_bessel_prime_data_ipp/sph_bessel_prime_data.txt + # sph_neumann_prime_data_ipp/sph_neumann_prime_data.txt + + # ellint_d2_data_ipp/ellint_d2_data.txt + # ellint_d_data_ipp/ellint_d_data.txt + # ellint_pi2_data_ipp/ellint_pi2_data.txt + # ellint_pi3_data_ipp/ellint_pi3_data.txt + # ellint_pi3_large_data_ipp/ellint_pi3_large_data.txt + data(elliprc, 'ellint_rc_data_ipp-ellint_rc_data', (0, 1), 2, + rtol=5e-16), + data(elliprd, 'ellint_rd_data_ipp-ellint_rd_data', (0, 1, 2), 3, + rtol=5e-16), + data(elliprd, 'ellint_rd_0xy_ipp-ellint_rd_0xy', (0, 1, 2), 3, + rtol=5e-16), + data(elliprd, 'ellint_rd_0yy_ipp-ellint_rd_0yy', (0, 1, 2), 3, + rtol=5e-16), + data(elliprd, 'ellint_rd_xxx_ipp-ellint_rd_xxx', (0, 1, 2), 3, + rtol=5e-16), + # Some of the following rtol for elliprd may be larger than 5e-16 to + # work around some hard cases in the Boost test where we get slightly + # larger error than the ideal bound when the x (==y) input is close to + # zero. + # Also the accuracy on 32-bit builds with g++ may suffer from excess + # loss of precision; see GCC bugzilla 323 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323 + data(elliprd, 'ellint_rd_xxz_ipp-ellint_rd_xxz', (0, 1, 2), 3, + rtol=6.5e-16), + data(elliprd, 'ellint_rd_xyy_ipp-ellint_rd_xyy', (0, 1, 2), 3, + rtol=6e-16), + data(elliprf, 'ellint_rf_data_ipp-ellint_rf_data', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_xxx_ipp-ellint_rf_xxx', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_xyy_ipp-ellint_rf_xyy', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_xy0_ipp-ellint_rf_xy0', (0, 1, 2), 3, + rtol=5e-16), + data(elliprf, 'ellint_rf_0yy_ipp-ellint_rf_0yy', (0, 1, 2), 3, + rtol=5e-16), + # The accuracy of R_G is primarily limited by R_D that is used + # internally. It is generally worse than R_D. Notice that we increased + # the rtol for R_G here. The cases with duplicate arguments are + # slightly less likely to be unbalanced (at least two arguments are + # already balanced) so the error bound is slightly better. Again, + # precision with g++ 32-bit is even worse. + data(elliprg, 'ellint_rg_ipp-ellint_rg', (0, 1, 2), 3, + rtol=8.0e-16), + data(elliprg, 'ellint_rg_xxx_ipp-ellint_rg_xxx', (0, 1, 2), 3, + rtol=6e-16), + data(elliprg, 'ellint_rg_xyy_ipp-ellint_rg_xyy', (0, 1, 2), 3, + rtol=7.5e-16), + data(elliprg, 'ellint_rg_xy0_ipp-ellint_rg_xy0', (0, 1, 2), 3, + rtol=5e-16), + data(elliprg, 'ellint_rg_00x_ipp-ellint_rg_00x', (0, 1, 2), 3, + rtol=5e-16), + data(elliprj, 'ellint_rj_data_ipp-ellint_rj_data', (0, 1, 2, 3), 4, + rtol=5e-16, atol=1e-25, + param_filter=(lambda s: s <= 5e-26,)), + # ellint_rc_data_ipp/ellint_rc_data.txt + # ellint_rd_0xy_ipp/ellint_rd_0xy.txt + # ellint_rd_0yy_ipp/ellint_rd_0yy.txt + # ellint_rd_data_ipp/ellint_rd_data.txt + # ellint_rd_xxx_ipp/ellint_rd_xxx.txt + # ellint_rd_xxz_ipp/ellint_rd_xxz.txt + # ellint_rd_xyy_ipp/ellint_rd_xyy.txt + # ellint_rf_0yy_ipp/ellint_rf_0yy.txt + # ellint_rf_data_ipp/ellint_rf_data.txt + # ellint_rf_xxx_ipp/ellint_rf_xxx.txt + # ellint_rf_xy0_ipp/ellint_rf_xy0.txt + # ellint_rf_xyy_ipp/ellint_rf_xyy.txt + # ellint_rg_00x_ipp/ellint_rg_00x.txt + # ellint_rg_ipp/ellint_rg.txt + # ellint_rg_xxx_ipp/ellint_rg_xxx.txt + # ellint_rg_xy0_ipp/ellint_rg_xy0.txt + # ellint_rg_xyy_ipp/ellint_rg_xyy.txt + # ellint_rj_data_ipp/ellint_rj_data.txt + # ellint_rj_e2_ipp/ellint_rj_e2.txt + # ellint_rj_e3_ipp/ellint_rj_e3.txt + # ellint_rj_e4_ipp/ellint_rj_e4.txt + # ellint_rj_zp_ipp/ellint_rj_zp.txt + + # jacobi_elliptic_ipp/jacobi_elliptic.txt + # jacobi_elliptic_small_ipp/jacobi_elliptic_small.txt + # jacobi_large_phi_ipp/jacobi_large_phi.txt + # jacobi_near_1_ipp/jacobi_near_1.txt + # jacobi_zeta_big_phi_ipp/jacobi_zeta_big_phi.txt + # jacobi_zeta_data_ipp/jacobi_zeta_data.txt + + # heuman_lambda_data_ipp/heuman_lambda_data.txt + + # hypergeometric_0F2_ipp/hypergeometric_0F2.txt + # hypergeometric_1F1_big_ipp/hypergeometric_1F1_big.txt + # hypergeometric_1F1_ipp/hypergeometric_1F1.txt + # hypergeometric_1F1_small_random_ipp/hypergeometric_1F1_small_random.txt + # hypergeometric_1F2_ipp/hypergeometric_1F2.txt + # hypergeometric_1f1_large_regularized_ipp/hypergeometric_1f1_large_regularized.txt # noqa: E501 + # hypergeometric_1f1_log_large_unsolved_ipp/hypergeometric_1f1_log_large_unsolved.txt # noqa: E501 + # hypergeometric_2F0_half_ipp/hypergeometric_2F0_half.txt + # hypergeometric_2F0_integer_a2_ipp/hypergeometric_2F0_integer_a2.txt + # hypergeometric_2F0_ipp/hypergeometric_2F0.txt + # hypergeometric_2F0_large_z_ipp/hypergeometric_2F0_large_z.txt + # hypergeometric_2F1_ipp/hypergeometric_2F1.txt + # hypergeometric_2F2_ipp/hypergeometric_2F2.txt + + # ncbeta_big_ipp/ncbeta_big.txt + # nct_small_delta_ipp/nct_small_delta.txt + # nct_asym_ipp/nct_asym.txt + # ncbeta_ipp/ncbeta.txt + + # powm1_data_ipp/powm1_big_data.txt + # powm1_sqrtp1m1_test_hpp/sqrtp1m1_data.txt + + # sinc_data_ipp/sinc_data.txt + + # test_gamma_data_ipp/gammap1m1_data.txt + # tgamma_ratio_data_ipp/tgamma_ratio_data.txt + + # trig_data_ipp/trig_data.txt + # trig_data2_ipp/trig_data2.txt +] + + +@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr) +def test_boost(test): + # Filter deprecation warnings of any deprecated functions. + if test.func in [btdtr, btdtri, btdtri_comp]: + with pytest.deprecated_call(): + _test_factory(test) + else: + _test_factory(test) + + +GSL_TESTS = [ + data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13), + data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13), + + # Also the GSL output has limited accuracy... + data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13), + + data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', + (0, 1, 2), 3, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', + (0, 1, 2), 4, rtol=1e-7, atol=1e-13), + + data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', + (0, 1, 2), 5, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', + (0, 1, 2), 6, rtol=1e-7, atol=1e-13), +] + + +@pytest.mark.parametrize('test', GSL_TESTS, ids=repr) +def test_gsl(test): + _test_factory(test) + + +LOCAL_TESTS = [ + data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2), + data_local(ellipkm1, 'ellipkm1', 0, 1), + data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2), + data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14), + data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14), + data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12), + data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11), + data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13), + data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13), + data_local(wright_bessel, 'wright_bessel', (0, 1, 2), 3, rtol=1e-11), +] + + +@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr) +def test_local(test): + _test_factory(test) + + +def _test_factory(test, dtype=np.float64): + """Boost test""" + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected") + with np.errstate(all='ignore'): + test.check(dtype=dtype) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f27dc7b71c1ae928b4bdd8bd987df9ca420bab --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py @@ -0,0 +1,45 @@ +import numpy as np +from numpy import pi, log, sqrt +from numpy.testing import assert_, assert_equal + +from scipy.special._testutils import FuncData +import scipy.special as sc + +# Euler-Mascheroni constant +euler = 0.57721566490153286 + + +def test_consistency(): + # Make sure the implementation of digamma for real arguments + # agrees with the implementation of digamma for complex arguments. + + # It's all poles after -1e16 + x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)] + dataset = np.vstack((x + 0j, sc.digamma(x))).T + FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check() + + +def test_special_values(): + # Test special values from Gauss's digamma theorem. See + # + # https://en.wikipedia.org/wiki/Digamma_function + + dataset = [ + (1, -euler), + (0.5, -2*log(2) - euler), + (1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler), + (1/4, -pi/2 - 3*log(2) - euler), + (1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler), + (1/8, + -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler) + ] + + dataset = np.asarray(dataset) + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() + + +def test_nonfinite(): + pts = [0.0, -0.0, np.inf] + std = [-np.inf, np.inf, np.inf] + assert_equal(sc.digamma(pts), std) + assert_(all(np.isnan(sc.digamma([-np.inf, -1])))) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py new file mode 100644 index 0000000000000000000000000000000000000000..8868f66c47ce0d4bbb21c78435a6c89d44065252 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py @@ -0,0 +1,85 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc +from scipy.special._testutils import FuncData + + +class TestVoigtProfile: + + @pytest.mark.parametrize('x, sigma, gamma', [ + (np.nan, 1, 1), + (0, np.nan, 1), + (0, 1, np.nan), + (1, np.nan, 0), + (np.nan, 1, 0), + (1, 0, np.nan), + (np.nan, 0, 1), + (np.nan, 0, 0) + ]) + def test_nan(self, x, sigma, gamma): + assert np.isnan(sc.voigt_profile(x, sigma, gamma)) + + @pytest.mark.parametrize('x, desired', [ + (-np.inf, 0), + (np.inf, 0) + ]) + def test_inf(self, x, desired): + assert sc.voigt_profile(x, 1, 1) == desired + + def test_against_mathematica(self): + # Results obtained from Mathematica by computing + # + # PDF[VoigtDistribution[gamma, sigma], x] + # + points = np.array([ + [-7.89, 45.06, 6.66, 0.0077921073660388806401], + [-0.05, 7.98, 24.13, 0.012068223646769913478], + [-13.98, 16.83, 42.37, 0.0062442236362132357833], + [-12.66, 0.21, 6.32, 0.010052516161087379402], + [11.34, 4.25, 21.96, 0.0113698923627278917805], + [-11.56, 20.40, 30.53, 0.0076332760432097464987], + [-9.17, 25.61, 8.32, 0.011646345779083005429], + [16.59, 18.05, 2.50, 0.013637768837526809181], + [9.11, 2.12, 39.33, 0.0076644040807277677585], + [-43.33, 0.30, 45.68, 0.0036680463875330150996] + ]) + FuncData( + sc.voigt_profile, + points, + (0, 1, 2), + 3, + atol=0, + rtol=1e-15 + ).check() + + def test_symmetry(self): + x = np.linspace(0, 10, 20) + assert_allclose( + sc.voigt_profile(x, 1, 1), + sc.voigt_profile(-x, 1, 1), + rtol=1e-15, + atol=0 + ) + + @pytest.mark.parametrize('x, sigma, gamma, desired', [ + (0, 0, 0, np.inf), + (1, 0, 0, 0) + ]) + def test_corner_cases(self, x, sigma, gamma, desired): + assert sc.voigt_profile(x, sigma, gamma) == desired + + @pytest.mark.parametrize('sigma1, gamma1, sigma2, gamma2', [ + (0, 1, 1e-16, 1), + (1, 0, 1, 1e-16), + (0, 0, 1e-16, 1e-16) + ]) + def test_continuity(self, sigma1, gamma1, sigma2, gamma2): + x = np.linspace(1, 10, 20) + assert_allclose( + sc.voigt_profile(x, sigma1, gamma1), + sc.voigt_profile(x, sigma2, gamma2), + rtol=1e-16, + atol=1e-16 + ) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py new file mode 100644 index 0000000000000000000000000000000000000000..aae34e5c23f2d293f362abd825f1dad454371ae0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py @@ -0,0 +1,136 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal + +import scipy.special as sc +from scipy.special._testutils import FuncData + + +INVALID_POINTS = [ + (1, -1), + (0, 0), + (-1, 1), + (np.nan, 1), + (1, np.nan) +] + + +class TestGammainc: + + @pytest.mark.parametrize('a, x', INVALID_POINTS) + def test_domain(self, a, x): + assert np.isnan(sc.gammainc(a, x)) + + def test_a_eq_0_x_gt_0(self): + assert sc.gammainc(0, 1) == 1 + + @pytest.mark.parametrize('a, x, desired', [ + (np.inf, 1, 0), + (np.inf, 0, 0), + (np.inf, np.inf, np.nan), + (1, np.inf, 1) + ]) + def test_infinite_arguments(self, a, x, desired): + result = sc.gammainc(a, x) + if np.isnan(desired): + assert np.isnan(result) + else: + assert result == desired + + def test_infinite_limits(self): + # Test that large arguments converge to the hard-coded limits + # at infinity. + assert_allclose( + sc.gammainc(1000, 100), + sc.gammainc(np.inf, 100), + atol=1e-200, # Use `atol` since the function converges to 0. + rtol=0 + ) + assert sc.gammainc(100, 1000) == sc.gammainc(100, np.inf) + + def test_x_zero(self): + a = np.arange(1, 10) + assert_array_equal(sc.gammainc(a, 0), 0) + + def test_limit_check(self): + result = sc.gammainc(1e-10, 1) + limit = sc.gammainc(0, 1) + assert np.isclose(result, limit) + + def gammainc_line(self, x): + # The line a = x where a simpler asymptotic expansion (analog + # of DLMF 8.12.15) is available. + c = np.array([-1/3, -1/540, 25/6048, 101/155520, + -3184811/3695155200, -2745493/8151736420]) + res = 0 + xfac = 1 + for ck in c: + res -= ck*xfac + xfac /= x + res /= np.sqrt(2*np.pi*x) + res += 0.5 + return res + + def test_line(self): + x = np.logspace(np.log10(25), 300, 500) + a = x + dataset = np.vstack((a, x, self.gammainc_line(x))).T + FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check() + + def test_roundtrip(self): + a = np.logspace(-5, 10, 100) + x = np.logspace(-5, 10, 100) + + y = sc.gammaincinv(a, sc.gammainc(a, x)) + assert_allclose(x, y, rtol=1e-10) + + +class TestGammaincc: + + @pytest.mark.parametrize('a, x', INVALID_POINTS) + def test_domain(self, a, x): + assert np.isnan(sc.gammaincc(a, x)) + + def test_a_eq_0_x_gt_0(self): + assert sc.gammaincc(0, 1) == 0 + + @pytest.mark.parametrize('a, x, desired', [ + (np.inf, 1, 1), + (np.inf, 0, 1), + (np.inf, np.inf, np.nan), + (1, np.inf, 0) + ]) + def test_infinite_arguments(self, a, x, desired): + result = sc.gammaincc(a, x) + if np.isnan(desired): + assert np.isnan(result) + else: + assert result == desired + + def test_infinite_limits(self): + # Test that large arguments converge to the hard-coded limits + # at infinity. + assert sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100) + assert_allclose( + sc.gammaincc(100, 1000), + sc.gammaincc(100, np.inf), + atol=1e-200, # Use `atol` since the function converges to 0. + rtol=0 + ) + + def test_limit_check(self): + result = sc.gammaincc(1e-10,1) + limit = sc.gammaincc(0,1) + assert np.isclose(result, limit) + + def test_x_zero(self): + a = np.arange(1, 10) + assert_array_equal(sc.gammaincc(a, 0), 1) + + def test_roundtrip(self): + a = np.logspace(-5, 10, 100) + x = np.logspace(-5, 10, 100) + + y = sc.gammainccinv(a, sc.gammaincc(a, x)) + assert_allclose(x, y, rtol=1e-14) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py new file mode 100644 index 0000000000000000000000000000000000000000..2fcb5a20037de46df939895d38fbe5fe6b85c9aa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py @@ -0,0 +1,70 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_ + +from scipy.special._testutils import FuncData +from scipy.special import gamma, gammaln, loggamma + + +def test_identities1(): + # test the identity exp(loggamma(z)) = gamma(z) + x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) + y = x.copy() + x, y = np.meshgrid(x, y) + z = (x + 1J*y).flatten() + dataset = np.vstack((z, gamma(z))).T + + def f(z): + return np.exp(loggamma(z)) + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_identities2(): + # test the identity loggamma(z + 1) = log(z) + loggamma(z) + x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) + y = x.copy() + x, y = np.meshgrid(x, y) + z = (x + 1J*y).flatten() + dataset = np.vstack((z, np.log(z) + loggamma(z))).T + + def f(z): + return loggamma(z + 1) + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_complex_dispatch_realpart(): + # Test that the real parts of loggamma and gammaln agree on the + # real axis. + x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5 + + dataset = np.vstack((x, gammaln(x))).T + + def f(z): + z = np.array(z, dtype='complex128') + return loggamma(z).real + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_real_dispatch(): + x = np.logspace(-10, 10) + 0.5 + dataset = np.vstack((x, gammaln(x))).T + + FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + assert_(loggamma(0) == np.inf) + assert_(np.isnan(loggamma(-1))) + + +def test_gh_6536(): + z = loggamma(complex(-3.4, +0.0)) + zbar = loggamma(complex(-3.4, -0.0)) + assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0) + + +def test_branch_cut(): + # Make sure negative zero is treated correctly + x = -np.logspace(300, -30, 100) + z = np.asarray([complex(x0, 0.0) for x0 in x]) + zbar = np.asarray([complex(x0, -0.0) for x0 in x]) + assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py new file mode 100644 index 0000000000000000000000000000000000000000..f675215e1f6baa3e1e5ff2b5d0a5ac77bc475dd1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py @@ -0,0 +1,212 @@ +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, + assert_array_almost_equal, assert_) + +from scipy.special import logsumexp, softmax + + +def test_logsumexp(): + # Test with zero-size array + a = [] + desired = -np.inf + assert_equal(logsumexp(a), desired) + + # Test whether logsumexp() function correctly handles large inputs. + a = np.arange(200) + desired = np.log(np.sum(np.exp(a))) + assert_almost_equal(logsumexp(a), desired) + + # Now test with large numbers + b = [1000, 1000] + desired = 1000.0 + np.log(2.0) + assert_almost_equal(logsumexp(b), desired) + + n = 1000 + b = np.full(n, 10000, dtype='float64') + desired = 10000.0 + np.log(n) + assert_almost_equal(logsumexp(b), desired) + + x = np.array([1e-40] * 1000000) + logx = np.log(x) + + X = np.vstack([x, x]) + logX = np.vstack([logx, logx]) + assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum()) + assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) + assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) + + # Handling special values properly + assert_equal(logsumexp(np.inf), np.inf) + assert_equal(logsumexp(-np.inf), -np.inf) + assert_equal(logsumexp(np.nan), np.nan) + assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf) + + # Handling an array with different magnitudes on the axes + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], axis=-1), + [1e10, -1e10]) + + # Test keeping dimensions + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], + axis=-1, + keepdims=True), + [[1e10], [-1e10]]) + + # Test multiple axes + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], + axis=(-1,-2)), + 1e10) + + +def test_logsumexp_b(): + a = np.arange(200) + b = np.arange(200, 0, -1) + desired = np.log(np.sum(b*np.exp(a))) + assert_almost_equal(logsumexp(a, b=b), desired) + + a = [1000, 1000] + b = [1.2, 1.2] + desired = 1000 + np.log(2 * 1.2) + assert_almost_equal(logsumexp(a, b=b), desired) + + x = np.array([1e-40] * 100000) + b = np.linspace(1, 1000, 100000) + logx = np.log(x) + + X = np.vstack((x, x)) + logX = np.vstack((logx, logx)) + B = np.vstack((b, b)) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum()) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)), + (B * X).sum(axis=0)) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)), + (B * X).sum(axis=1)) + + +def test_logsumexp_sign(): + a = [1,1,1] + b = [1,-1,-1] + + r, s = logsumexp(a, b=b, return_sign=True) + assert_almost_equal(r,1) + assert_equal(s,-1) + + +def test_logsumexp_sign_zero(): + a = [1,1] + b = [1,-1] + + r, s = logsumexp(a, b=b, return_sign=True) + assert_(not np.isfinite(r)) + assert_(not np.isnan(r)) + assert_(r < 0) + assert_equal(s,0) + + +def test_logsumexp_sign_shape(): + a = np.ones((1,2,3,4)) + b = np.ones_like(a) + + r, s = logsumexp(a, axis=2, b=b, return_sign=True) + + assert_equal(r.shape, s.shape) + assert_equal(r.shape, (1,2,4)) + + r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True) + + assert_equal(r.shape, s.shape) + assert_equal(r.shape, (1,3)) + + +def test_logsumexp_complex_sign(): + a = np.array([1 + 1j, 2 - 1j, -2 + 3j]) + + r, s = logsumexp(a, return_sign=True) + + expected_sumexp = np.exp(a).sum() + # This is the numpy>=2.0 convention for np.sign + expected_sign = expected_sumexp / abs(expected_sumexp) + + assert_allclose(s, expected_sign) + assert_allclose(s * np.exp(r), expected_sumexp) + + +def test_logsumexp_shape(): + a = np.ones((1, 2, 3, 4)) + b = np.ones_like(a) + + r = logsumexp(a, axis=2, b=b) + assert_equal(r.shape, (1, 2, 4)) + + r = logsumexp(a, axis=(1, 3), b=b) + assert_equal(r.shape, (1, 3)) + + +def test_logsumexp_b_zero(): + a = [1,10000] + b = [1,0] + + assert_almost_equal(logsumexp(a, b=b), 1) + + +def test_logsumexp_b_shape(): + a = np.zeros((4,1,2,1)) + b = np.ones((3,1,5)) + + logsumexp(a, b=b) + + +def test_softmax_fixtures(): + assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]), + rtol=1e-13) + assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13) + assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e), + rtol=1e-13) + + # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then + # converted to float. + x = np.arange(4) + expected = np.array([0.03205860328008499, + 0.08714431874203256, + 0.23688281808991013, + 0.6439142598879722]) + + assert_allclose(softmax(x), expected, rtol=1e-13) + + # Translation property. If all the values are changed by the same amount, + # the softmax result does not change. + assert_allclose(softmax(x + 100), expected, rtol=1e-13) + + # When axis=None, softmax operates on the entire array, and preserves + # the shape. + assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2), + rtol=1e-13) + + +def test_softmax_multi_axes(): + assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0), + np.array([[.5, .5], [.5, .5]]), rtol=1e-13) + assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1), + np.array([[1, 0], [1, 0]]), rtol=1e-13) + + # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then + # converted to float. + x = np.array([[-25, 0, 25, 50], + [1, 325, 749, 750]]) + expected = np.array([[2.678636961770877e-33, + 1.9287498479371314e-22, + 1.3887943864771144e-11, + 0.999999999986112], + [0.0, + 1.9444526359919372e-185, + 0.2689414213699951, + 0.7310585786300048]]) + assert_allclose(softmax(x, axis=1), expected, rtol=1e-13) + assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13) + + # 3-d input, with a tuple for the axis. + x3d = x.reshape(2, 2, 2) + assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2), + rtol=1e-13) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_round.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_round.py new file mode 100644 index 0000000000000000000000000000000000000000..07d3850f1084025174e6e9172be63e67a0837f1f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_round.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest + +from scipy.special import _test_internal + + +@pytest.mark.fail_slow(5) +@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()") +def test_add_round_up(): + np.random.seed(1234) + _test_internal.test_add_round(10**5, 'up') + + +@pytest.mark.fail_slow(5) +@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()") +def test_add_round_down(): + np.random.seed(1234) + _test_internal.test_add_round(10**5, 'down') diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py new file mode 100644 index 0000000000000000000000000000000000000000..26c8413f0ea26f56ca13d869e83e5fc6ab2a0d98 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py @@ -0,0 +1,142 @@ +import sys +import warnings + +import numpy as np +from numpy.testing import assert_, assert_equal, IS_PYPY +import pytest +from pytest import raises as assert_raises + +import scipy.special as sc +from scipy.special._ufuncs import _sf_error_test_function + +_sf_error_code_map = { + # skip 'ok' + 'singular': 1, + 'underflow': 2, + 'overflow': 3, + 'slow': 4, + 'loss': 5, + 'no_result': 6, + 'domain': 7, + 'arg': 8, + 'other': 9 +} + +_sf_error_actions = [ + 'ignore', + 'warn', + 'raise' +] + + +def _check_action(fun, args, action): + # TODO: special expert should correct + # the coercion at the true location? + args = np.asarray(args, dtype=np.dtype("long")) + if action == 'warn': + with pytest.warns(sc.SpecialFunctionWarning): + fun(*args) + elif action == 'raise': + with assert_raises(sc.SpecialFunctionError): + fun(*args) + else: + # action == 'ignore', make sure there are no warnings/exceptions + with warnings.catch_warnings(): + warnings.simplefilter("error") + fun(*args) + + +def test_geterr(): + err = sc.geterr() + for key, value in err.items(): + assert_(key in _sf_error_code_map) + assert_(value in _sf_error_actions) + + +def test_seterr(): + entry_err = sc.geterr() + try: + for category, error_code in _sf_error_code_map.items(): + for action in _sf_error_actions: + geterr_olderr = sc.geterr() + seterr_olderr = sc.seterr(**{category: action}) + assert_(geterr_olderr == seterr_olderr) + newerr = sc.geterr() + assert_(newerr[category] == action) + geterr_olderr.pop(category) + newerr.pop(category) + assert_(geterr_olderr == newerr) + _check_action(_sf_error_test_function, (error_code,), action) + finally: + sc.seterr(**entry_err) + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_sf_error_special_refcount(): + # Regression test for gh-16233. + # Check that the reference count of scipy.special is not increased + # when a SpecialFunctionError is raised. + refcount_before = sys.getrefcount(sc) + with sc.errstate(all='raise'): + with pytest.raises(sc.SpecialFunctionError, match='domain error'): + sc.ndtri(2.0) + refcount_after = sys.getrefcount(sc) + assert refcount_after == refcount_before + + +def test_errstate_pyx_basic(): + olderr = sc.geterr() + with sc.errstate(singular='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.loggamma(0) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_c_basic(): + olderr = sc.geterr() + with sc.errstate(domain='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.spence(-1) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_cpp_basic(): + olderr = sc.geterr() + with sc.errstate(underflow='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.wrightomega(-1000) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_cpp_scipy_special(): + olderr = sc.geterr() + with sc.errstate(singular='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.lambertw(0, 1) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_cpp_alt_ufunc_machinery(): + olderr = sc.geterr() + with sc.errstate(singular='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.gammaln(0) + assert_equal(olderr, sc.geterr()) + + +def test_errstate(): + for category, error_code in _sf_error_code_map.items(): + for action in _sf_error_actions: + olderr = sc.geterr() + with sc.errstate(**{category: action}): + _check_action(_sf_error_test_function, (error_code,), action) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_all_but_one(): + olderr = sc.geterr() + with sc.errstate(all='raise', singular='ignore'): + sc.gammaln(0) + with assert_raises(sc.SpecialFunctionError): + sc.spence(-1.0) + assert_equal(olderr, sc.geterr()) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..c4a047c78fb8542bd0abbb75a4815d777e1414b0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py @@ -0,0 +1,61 @@ +import numpy as np +from numpy.testing import (assert_array_equal, + assert_array_almost_equal_nulp, assert_almost_equal) +from pytest import raises as assert_raises + +from scipy.special import gammaln, multigammaln + + +class TestMultiGammaLn: + + def test1(self): + # A test of the identity + # Gamma_1(a) = Gamma(a) + np.random.seed(1234) + a = np.abs(np.random.randn()) + assert_array_equal(multigammaln(a, 1), gammaln(a)) + + def test2(self): + # A test of the identity + # Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5) + a = np.array([2.5, 10.0]) + result = multigammaln(a, 2) + expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5) + assert_almost_equal(result, expected) + + def test_bararg(self): + assert_raises(ValueError, multigammaln, 0.5, 1.2) + + +def _check_multigammaln_array_result(a, d): + # Test that the shape of the array returned by multigammaln + # matches the input shape, and that all the values match + # the value computed when multigammaln is called with a scalar. + result = multigammaln(a, d) + assert_array_equal(a.shape, result.shape) + a1 = a.ravel() + result1 = result.ravel() + for i in range(a.size): + assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d)) + + +def test_multigammaln_array_arg(): + # Check that the array returned by multigammaln has the correct + # shape and contains the correct values. The cases have arrays + # with several different shapes. + # The cases include a regression test for ticket #1849 + # (a = np.array([2.0]), an array with a single element). + np.random.seed(1234) + + cases = [ + # a, d + (np.abs(np.random.randn(3, 2)) + 5, 5), + (np.abs(np.random.randn(1, 2)) + 5, 5), + (np.arange(10.0, 18.0).reshape(2, 2, 2), 3), + (np.array([2.0]), 3), + (np.float64(2.0), 3), + ] + + for a, d in cases: + _check_multigammaln_array_result(a, d) + diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py new file mode 100644 index 0000000000000000000000000000000000000000..4c78180cb778f6914e569675ea31e3c4dacdd4ad --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py @@ -0,0 +1,61 @@ +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc +from scipy.special._basic import _sph_harm_all + + +def test_first_harmonics(): + # Test against explicit representations of the first four + # spherical harmonics which use `theta` as the azimuthal angle, + # `phi` as the polar angle, and include the Condon-Shortley + # phase. + + # Notation is Ymn + def Y00(theta, phi): + return 0.5*np.sqrt(1/np.pi) + + def Yn11(theta, phi): + return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi) + + def Y01(theta, phi): + return 0.5*np.sqrt(3/np.pi)*np.cos(phi) + + def Y11(theta, phi): + return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi) + + harms = [Y00, Yn11, Y01, Y11] + m = [0, -1, 0, 1] + n = [0, 1, 1, 1] + + theta = np.linspace(0, 2*np.pi) + phi = np.linspace(0, np.pi) + theta, phi = np.meshgrid(theta, phi) + + for harm, m, n in zip(harms, m, n): + assert_allclose(sc.sph_harm(m, n, theta, phi), + harm(theta, phi), + rtol=1e-15, atol=1e-15, + err_msg=f"Y^{m}_{n} incorrect") + + +def test_all_harmonics(): + n_max = 50 + + theta = np.linspace(0, 2 * np.pi) + phi = np.linspace(0, np.pi) + + y_actual = _sph_harm_all(2 * n_max, n_max, theta, phi) + + for n in [0, 1, 2, 5, 10, 20, 50]: + for m in [0, 1, 2, 5, 10, 20, 50]: + if (m <= n): + y_desired = sc.sph_harm(m, n, theta, phi) + else: + y_desired = 0 + np.testing.assert_allclose(y_actual[m, n], y_desired, rtol = 1e-05) + + if (m <= n): + y_desired = sc.sph_harm(-m, n, theta, phi) + else: + y_desired = 0 + np.testing.assert_allclose(y_actual[-m, n], y_desired, rtol = 1e-05) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..847bb3b49103ee15126291a3dfe9a3e80f2765c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py @@ -0,0 +1,385 @@ +# +# Tests of spherical Bessel functions. +# +import numpy as np +from numpy.testing import (assert_almost_equal, assert_allclose, + assert_array_almost_equal, suppress_warnings) +import pytest +from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi + +from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn +from scipy.integrate import quad + + +class TestSphericalJn: + def test_spherical_jn_exact(self): + # https://dlmf.nist.gov/10.49.E3 + # Note: exact expression is numerically stable only for small + # n or z >> n. + x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) + assert_allclose(spherical_jn(2, x), + (-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x)) + + def test_spherical_jn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x), + (2*n + 1)/x*spherical_jn(n, x)) + + def test_spherical_jn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x), + (2*n + 1)/x*spherical_jn(n, x)) + + def test_spherical_jn_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 6 + x = np.array([-inf, inf]) + assert_allclose(spherical_jn(n, x), np.array([0, 0])) + + def test_spherical_jn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E3 + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)])) + + def test_spherical_jn_large_arg_1(self): + # https://github.com/scipy/scipy/issues/2165 + # Reference value computed using mpmath, via + # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) + assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747) + + def test_spherical_jn_large_arg_2(self): + # https://github.com/scipy/scipy/issues/1641 + # Reference value computed using mpmath, via + # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) + assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05) + + def test_spherical_jn_at_zero(self): + # https://dlmf.nist.gov/10.52.E1 + # But note that n = 0 is a special case: j0 = sin(x)/x -> 1 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0])) + + +class TestSphericalYn: + def test_spherical_yn_exact(self): + # https://dlmf.nist.gov/10.49.E5 + # Note: exact expression is numerically stable only for small + # n or z >> n. + x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) + assert_allclose(spherical_yn(2, x), + (1/x - 3/x**3)*cos(x) - 3/x**2*sin(x)) + + def test_spherical_yn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x), + (2*n + 1)/x*spherical_yn(n, x)) + + def test_spherical_yn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x), + (2*n + 1)/x*spherical_yn(n, x)) + + def test_spherical_yn_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 6 + x = np.array([-inf, inf]) + assert_allclose(spherical_yn(n, x), np.array([0, 0])) + + def test_spherical_yn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E3 + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)])) + + def test_spherical_yn_at_zero(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf)) + + def test_spherical_yn_at_zero_complex(self): + # Consistently with numpy: + # >>> -np.cos(0)/0 + # -inf + # >>> -np.cos(0+0j)/(0+0j) + # (-inf + nan*j) + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + 0j + assert_allclose(spherical_yn(n, x), np.full(n.shape, nan)) + + +class TestSphericalJnYnCrossProduct: + def test_spherical_jn_yn_cross_product_1(self): + # https://dlmf.nist.gov/10.50.E3 + n = np.array([1, 5, 8]) + x = np.array([0.1, 1, 10]) + left = (spherical_jn(n + 1, x) * spherical_yn(n, x) - + spherical_jn(n, x) * spherical_yn(n + 1, x)) + right = 1/x**2 + assert_allclose(left, right) + + def test_spherical_jn_yn_cross_product_2(self): + # https://dlmf.nist.gov/10.50.E3 + n = np.array([1, 5, 8]) + x = np.array([0.1, 1, 10]) + left = (spherical_jn(n + 2, x) * spherical_yn(n, x) - + spherical_jn(n, x) * spherical_yn(n + 2, x)) + right = (2*n + 3)/x**3 + assert_allclose(left, right) + + +class TestSphericalIn: + def test_spherical_in_exact(self): + # https://dlmf.nist.gov/10.49.E9 + x = np.array([0.12, 1.23, 12.34, 123.45]) + assert_allclose(spherical_in(2, x), + (1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x)) + + def test_spherical_in_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), + (2*n + 1)/x*spherical_in(n, x)) + + def test_spherical_in_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), + (2*n + 1)/x*spherical_in(n, x)) + + def test_spherical_in_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 5 + x = np.array([-inf, inf]) + assert_allclose(spherical_in(n, x), np.array([-inf, inf])) + + def test_spherical_in_inf_complex(self): + # https://dlmf.nist.gov/10.52.E5 + # Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but + # this appears impossible to achieve because C99 regards any complex + # value with at least one infinite part as a complex infinity, so + # 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is + # the correct return value. + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan])) + + def test_spherical_in_at_zero(self): + # https://dlmf.nist.gov/10.52.E1 + # But note that n = 0 is a special case: i0 = sinh(x)/x -> 1 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0])) + + +class TestSphericalKn: + def test_spherical_kn_exact(self): + # https://dlmf.nist.gov/10.49.E13 + x = np.array([0.12, 1.23, 12.34, 123.45]) + assert_allclose(spherical_kn(2, x), + pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3)) + + def test_spherical_kn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose( + (-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), + (-1)**n*(2*n + 1)/x*spherical_kn(n, x) + ) + + def test_spherical_kn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose( + (-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), + (-1)**n*(2*n + 1)/x*spherical_kn(n, x) + ) + + def test_spherical_kn_inf_real(self): + # https://dlmf.nist.gov/10.52.E6 + n = 5 + x = np.array([-inf, inf]) + assert_allclose(spherical_kn(n, x), np.array([-inf, 0])) + + def test_spherical_kn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E6 + # The behavior at complex infinity depends on the sign of the real + # part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's + # z*inf. This distinction cannot be captured, so we return nan. + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan])) + + def test_spherical_kn_at_zero(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_kn(n, x), np.full(n.shape, inf)) + + def test_spherical_kn_at_zero_complex(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + 0j + assert_allclose(spherical_kn(n, x), np.full(n.shape, nan)) + + +class SphericalDerivativesTestCase: + def fundamental_theorem(self, n, a, b): + integral, tolerance = quad(lambda z: self.df(n, z), a, b) + assert_allclose(integral, + self.f(n, b) - self.f(n, a), + atol=tolerance) + + @pytest.mark.slow + def test_fundamental_theorem_0(self): + self.fundamental_theorem(0, 3.0, 15.0) + + @pytest.mark.slow + def test_fundamental_theorem_7(self): + self.fundamental_theorem(7, 0.5, 1.2) + + +class TestSphericalJnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_jn(n, z) + + def df(self, n, z): + return spherical_jn(n, z, derivative=True) + + def test_spherical_jn_d_zero(self): + n = np.array([0, 1, 2, 3, 7, 15]) + assert_allclose(spherical_jn(n, 0, derivative=True), + np.array([0, 1/3, 0, 0, 0, 0])) + + +class TestSphericalYnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_yn(n, z) + + def df(self, n, z): + return spherical_yn(n, z, derivative=True) + + +class TestSphericalInDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_in(n, z) + + def df(self, n, z): + return spherical_in(n, z, derivative=True) + + def test_spherical_in_d_zero(self): + n = np.array([0, 1, 2, 3, 7, 15]) + spherical_in(n, 0, derivative=False) + assert_allclose(spherical_in(n, 0, derivative=True), + np.array([0, 1/3, 0, 0, 0, 0])) + + +class TestSphericalKnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_kn(n, z) + + def df(self, n, z): + return spherical_kn(n, z, derivative=True) + + +class TestSphericalOld: + # These are tests from the TestSpherical class of test_basic.py, + # rewritten to use spherical_* instead of sph_* but otherwise unchanged. + + def test_sph_in(self): + # This test reproduces test_basic.TestSpherical.test_sph_in. + i1n = np.empty((2,2)) + x = 0.2 + + i1n[0][0] = spherical_in(0, x) + i1n[0][1] = spherical_in(1, x) + i1n[1][0] = spherical_in(0, x, derivative=True) + i1n[1][1] = spherical_in(1, x, derivative=True) + + inp0 = (i1n[0][1]) + inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1]) + assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381, + 0.066933714568029540839]),12) + assert_array_almost_equal(i1n[1],[inp0,inp1],12) + + def test_sph_in_kn_order0(self): + x = 1. + sph_i0 = np.empty((2,)) + sph_i0[0] = spherical_in(0, x) + sph_i0[1] = spherical_in(0, x, derivative=True) + sph_i0_expected = np.array([np.sinh(x)/x, + np.cosh(x)/x-np.sinh(x)/x**2]) + assert_array_almost_equal(r_[sph_i0], sph_i0_expected) + + sph_k0 = np.empty((2,)) + sph_k0[0] = spherical_kn(0, x) + sph_k0[1] = spherical_kn(0, x, derivative=True) + sph_k0_expected = np.array([0.5*pi*exp(-x)/x, + -0.5*pi*exp(-x)*(1/x+1/x**2)]) + assert_array_almost_equal(r_[sph_k0], sph_k0_expected) + + def test_sph_jn(self): + s1 = np.empty((2,3)) + x = 0.2 + + s1[0][0] = spherical_jn(0, x) + s1[0][1] = spherical_jn(1, x) + s1[0][2] = spherical_jn(2, x) + s1[1][0] = spherical_jn(0, x, derivative=True) + s1[1][1] = spherical_jn(1, x, derivative=True) + s1[1][2] = spherical_jn(2, x, derivative=True) + + s10 = -s1[0][1] + s11 = s1[0][0]-2.0/0.2*s1[0][1] + s12 = s1[0][1]-3.0/0.2*s1[0][2] + assert_array_almost_equal(s1[0],[0.99334665397530607731, + 0.066400380670322230863, + 0.0026590560795273856680],12) + assert_array_almost_equal(s1[1],[s10,s11,s12],12) + + def test_sph_kn(self): + kn = np.empty((2,3)) + x = 0.2 + + kn[0][0] = spherical_kn(0, x) + kn[0][1] = spherical_kn(1, x) + kn[0][2] = spherical_kn(2, x) + kn[1][0] = spherical_kn(0, x, derivative=True) + kn[1][1] = spherical_kn(1, x, derivative=True) + kn[1][2] = spherical_kn(2, x, derivative=True) + + kn0 = -kn[0][1] + kn1 = -kn[0][0]-2.0/0.2*kn[0][1] + kn2 = -kn[0][1]-3.0/0.2*kn[0][2] + assert_array_almost_equal(kn[0],[6.4302962978445670140, + 38.581777787067402086, + 585.15696310385559829],12) + assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9) + + def test_sph_yn(self): + sy1 = spherical_yn(2, 0.2) + sy2 = spherical_yn(0, 0.2) + assert_almost_equal(sy1,-377.52483,5) # previous values in the system + assert_almost_equal(sy2,-4.9003329,5) + sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3 + sy3 = spherical_yn(1, 0.2, derivative=True) + # compare correct derivative val. (correct =-system val). + assert_almost_equal(sy3,sphpy,4) diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..35c906a3224ae5c15f1b03f943ea8891b342a3ee --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple _linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..52a0ee9ecf4085641ffe3dae61d30e162d2a13b6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_warn_in_autograd(const at::Tensor & self); +TORCH_API at::Tensor & _test_warn_in_autograd_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..22da564686ca13008f7f4d0f2ae520e14ffa2e2c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b388a3d663227234abdbbdc0a9db6416c3986c55 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b7e946dedd4eaee429ecd2ffe0d068ddb3d7517a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API as_strided_copy { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); +}; + +struct TORCH_API as_strided_copy_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f85e61dd4f6f0b36825a680506b715c1bc7e6596 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool2d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); +}; + +struct TORCH_API avg_pool2d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..810b8552c3c244ef801aa5dcea6e1c0d528aea21 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8f328b7804ad2f7783df4bc2f3657f6ac23ddaf0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf0a6fe31c3d4aaaf8162cb08a9f5b2a719ea6c2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0); +TORCH_API at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mul_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mul_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..737652d17bbf53ce63caf0f3028ec45df8d18664 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mul_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor mul(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & mul_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & mul_(at::Tensor & self, const at::Scalar & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute.h new file mode 100644 index 0000000000000000000000000000000000000000..56f89ed7348f9cd87d2af760dd604f17e0afef9c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) +inline at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute::call(self, dims); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ccdc074aa7be93e92496184d2ed68cc25d0b6308 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & quantize_per_tensor_dynamic_out(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); +TORCH_API at::Tensor quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9bc6f283182ff07c2b9aa6991e7a63f62b09e529 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API refine_names { + using schema = at::Tensor (const at::Tensor &, at::DimnameList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::refine_names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::DimnameList names); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc475d4a216c7b9382077a77f3fdd1ef3018a3c8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0); +TORCH_API at::Tensor & smooth_l1_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0); +TORCH_API at::Tensor & smooth_l1_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8072a5e2162cd710496324f406f020a3df4efb0b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_spherical_bessel_j0(const at::Tensor & x); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70e9022fa034ed438d9468653250f0aa1459fc60 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple std_mean(const at::Tensor & self, bool unbiased); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/METADATA b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8ac541d1b185dcf0ddecd0a43668bb3f124346e7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/METADATA @@ -0,0 +1,159 @@ +Metadata-Version: 2.1 +Name: compressed-tensors +Version: 0.9.1 +Summary: Library for utilization of compressed safetensors of neural network models +Home-page: https://github.com/neuralmagic/compressed-tensors +Author: Neuralmagic, Inc. +Author-email: support@neuralmagic.com +License: Apache 2.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: torch>=1.7.0 +Requires-Dist: transformers +Requires-Dist: pydantic>=2.0 +Provides-Extra: accelerate +Requires-Dist: accelerate; extra == "accelerate" +Provides-Extra: dev +Requires-Dist: black==22.12.0; extra == "dev" +Requires-Dist: isort==5.8.0; extra == "dev" +Requires-Dist: wheel>=0.36.2; extra == "dev" +Requires-Dist: flake8>=3.8.3; extra == "dev" +Requires-Dist: pytest>=6.0.0; extra == "dev" +Requires-Dist: nbconvert>=7.16.3; extra == "dev" + +# compressed-tensors + +The `compressed-tensors` library extends the [safetensors](https://github.com/huggingface/safetensors) format, providing a versatile and efficient way to store and manage compressed tensor data. This library supports various quantization and sparsity schemes, making it a unified format for handling different model optimizations like GPTQ, AWQ, SmoothQuant, INT8, FP8, SparseGPT, and more. + +## Why `compressed-tensors`? + +As model compression becomes increasingly important for efficient deployment of LLMs, the landscape of quantization and compression techniques has become increasingly fragmented. +Each method often comes with its own storage format and loading procedures, making it challenging to work with multiple techniques or switch between them. +`compressed-tensors` addresses this by providing a single, extensible format that can represent a wide variety of compression schemes. + +* **Unified Checkpoint Format**: Supports various compression schemes in a single, consistent format. +* **Wide Compatibility**: Works with popular quantization methods like GPTQ, SmoothQuant, and FP8. See [llm-compressor](https://github.com/vllm-project/llm-compressor) +* **Flexible Quantization Support**: + * Weight-only quantization (e.g., W4A16, W8A16, WnA16) + * Activation quantization (e.g., W8A8) + * KV cache quantization + * Non-uniform schemes (different layers can be quantized in different ways!) +* **Sparsity Support**: Handles both unstructured and semi-structured (e.g., 2:4) sparsity patterns. +* **Open-Source Integration**: Designed to work seamlessly with Hugging Face models and PyTorch. + +This allows developers and researchers to easily experiment with composing different quantization methods, simplify model deployment pipelines, and reduce the overhead of supporting multiple compression formats in inference engines. + +## Installation + +### From [PyPI](https://pypi.org/project/compressed-tensors) + +Stable release: +```bash +pip install compressed-tensors +``` + +Nightly release: +```bash +pip install compressed-tensors-nightly +``` + +### From Source + +```bash +git clone https://github.com/neuralmagic/compressed-tensors +cd compressed-tensors +pip install -e . +``` + +## Getting started + +### Saving/Loading Compressed Tensors (Bitmask Compression) + +The function `save_compressed` uses the `compression_format` argument to apply compression to tensors. +The function `load_compressed` reverses the process: converts the compressed weights on disk to decompressed weights in device memory. + +```python +from compressed_tensors import save_compressed, load_compressed, BitmaskConfig +from torch import Tensor +from typing import Dict + +# the example BitmaskConfig method efficiently compresses +# tensors with large number of zero entries +compression_config = BitmaskConfig() + +tensors: Dict[str, Tensor] = {"tensor_1": Tensor( + [[0.0, 0.0, 0.0], + [1.0, 1.0, 1.0]] +)} +# compress tensors using BitmaskConfig compression format (save them efficiently on disk) +save_compressed(tensors, "model.safetensors", compression_format=compression_config.format) + +# decompress tensors (load_compressed returns a generator for memory efficiency) +decompressed_tensors = {} +for tensor_name, tensor in load_compressed("model.safetensors", compression_config = compression_config): + decompressed_tensors[tensor_name] = tensor +``` + +## Saving/Loading Compressed Models (Bitmask Compression) + +We can apply bitmask compression to a whole model. For more detailed example see `example` directory. +```python +from compressed_tensors import save_compressed_model, load_compressed, BitmaskConfig +from transformers import AutoModelForCausalLM + +model_name = "neuralmagic/llama2.c-stories110M-pruned50" +model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto") + +original_state_dict = model.state_dict() + +compression_config = BitmaskConfig() + +# save compressed model weights +save_compressed_model(model, "compressed_model.safetensors", compression_format=compression_config.format) + +# load compressed model weights (`dict` turns generator into a dictionary) +state_dict = dict(load_compressed("compressed_model.safetensors", compression_config)) +``` + +For more in-depth tutorial on bitmask compression, refer to the [notebook](https://github.com/neuralmagic/compressed-tensors/blob/d707c5b84bc3fef164aebdcd97cb6eaa571982f8/examples/bitmask_compression.ipynb). + + +## Saving a Compressed Model with PTQ + +We can use compressed-tensors to run basic post training quantization (PTQ) and save the quantized model compressed on disk + +```python +model_name = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" +model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cuda:0", torch_dtype="auto") + +config = QuantizationConfig.parse_file("./examples/bit_packing/int4_config.json") +config.quantization_status = QuantizationStatus.CALIBRATION +apply_quantization_config(model, config) + +dataset = load_dataset("ptb_text_only")["train"] +tokenizer = AutoTokenizer.from_pretrained(model_name) + +def tokenize_function(examples): + return tokenizer(examples["sentence"], padding=False, truncation=True, max_length=1024) + +tokenized_dataset = dataset.map(tokenize_function, batched=True) +data_loader = DataLoader(tokenized_dataset, batch_size=1, collate_fn=DefaultDataCollator()) + +with torch.no_grad(): + for idx, sample in tqdm(enumerate(data_loader), desc="Running calibration"): + sample = {key: value.to(device) for key,value in sample.items()} + _ = model(**sample) + + if idx >= 512: + break + +model.apply(freeze_module_quantization) +model.apply(compress_quantized_weights) + +output_dir = "./ex_llama1.1b_w4a16_packed_quantize" +compressor = ModelCompressor(quantization_config=config) +compressed_state_dict = compressor.compress(model) +model.save_pretrained(output_dir, state_dict=compressed_state_dict) +``` + +For more in-depth tutorial on quantization compression, refer to the [notebook](./examples/quantize_and_pack_int4.ipynb). diff --git a/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/RECORD b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..06013507819f86718a5b0ce77deace727f4c2ffe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/RECORD @@ -0,0 +1,101 @@ +compressed_tensors-0.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +compressed_tensors-0.9.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +compressed_tensors-0.9.1.dist-info/METADATA,sha256=LTzdui2DBwsv09xaTEsW2X66rd775Jf2lY9v9hs_WJg,6782 +compressed_tensors-0.9.1.dist-info/RECORD,, +compressed_tensors-0.9.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +compressed_tensors-0.9.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92 +compressed_tensors-0.9.1.dist-info/top_level.txt,sha256=w2i-GyPs2s1UwVxvutSvN_lM22SXC2hQFBmoMcPnV7Y,19 +compressed_tensors/__init__.py,sha256=UtKmifNeBCSE2TZSAfduVNNzHY-3V7bLjZ7n7RuXLOE,812 +compressed_tensors/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/__pycache__/base.cpython-310.pyc,, +compressed_tensors/__pycache__/version.cpython-310.pyc,, +compressed_tensors/base.py,sha256=73HYH7HY7O2roC89yG_piPFnZwrBfn_i7HmKl90SKc0,875 +compressed_tensors/compressors/__init__.py,sha256=smSygTSfcfuujRrAXDc6uZm4L_ccV1tWZewqVnOb4lM,825 +compressed_tensors/compressors/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/compressors/__pycache__/base.cpython-310.pyc,, +compressed_tensors/compressors/__pycache__/helpers.cpython-310.pyc,, +compressed_tensors/compressors/base.py,sha256=D9TNwQcjanDiAHODPbg8JUqc66e3j50rctY7A708NEs,6743 +compressed_tensors/compressors/helpers.py,sha256=OK6qxX9j3bHwF9JfIYSGMgBJe2PWjlTA3byXKCJaTIQ,5431 +compressed_tensors/compressors/model_compressors/__init__.py,sha256=5RGGPFu4YqEt_aOdFSQYFYFDjcZFJN0CsMqRtDZz3Js,666 +compressed_tensors/compressors/model_compressors/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/compressors/model_compressors/__pycache__/model_compressor.cpython-310.pyc,, +compressed_tensors/compressors/model_compressors/model_compressor.py,sha256=3WyzAW2Rm_uLprxwO2QH6FR76W6Mk4r2yedayaSZHhw,18396 +compressed_tensors/compressors/quantized_compressors/__init__.py,sha256=09UJq68Pht6Bf-4iP9xYl3tetKsncNPHD8IAGbePsr4,714 +compressed_tensors/compressors/quantized_compressors/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/compressors/quantized_compressors/__pycache__/base.cpython-310.pyc,, +compressed_tensors/compressors/quantized_compressors/__pycache__/naive_quantized.cpython-310.pyc,, +compressed_tensors/compressors/quantized_compressors/__pycache__/pack_quantized.cpython-310.pyc,, +compressed_tensors/compressors/quantized_compressors/base.py,sha256=LVqSSqSjGi8LB-X13zC_0AFHc8BobGQVC0zjInDhOWE,7217 +compressed_tensors/compressors/quantized_compressors/naive_quantized.py,sha256=fahmPJFz49rVS7q705uQwZ0kUtdP46GuXR7nPr6uIqI,4943 +compressed_tensors/compressors/quantized_compressors/pack_quantized.py,sha256=OO5dceCfNVuY8A23kBg6z2wk-zGUVqR_MyLvObvT7pk,7741 +compressed_tensors/compressors/sparse_compressors/__init__.py,sha256=Atuz-OdEgn8OCUhx7Ovd6gXdyImAI186uCR-uR0t_Nk,737 +compressed_tensors/compressors/sparse_compressors/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/compressors/sparse_compressors/__pycache__/base.cpython-310.pyc,, +compressed_tensors/compressors/sparse_compressors/__pycache__/dense.cpython-310.pyc,, +compressed_tensors/compressors/sparse_compressors/__pycache__/sparse_24_bitmask.cpython-310.pyc,, +compressed_tensors/compressors/sparse_compressors/__pycache__/sparse_bitmask.cpython-310.pyc,, +compressed_tensors/compressors/sparse_compressors/base.py,sha256=9e841MQWr0j8m33ejDw_jP5_BIpQ5099x9_pvuZ-Nr0,5944 +compressed_tensors/compressors/sparse_compressors/dense.py,sha256=lSKNWRx6H7aUqaJj1j4qbXk8Gkm1UohbnvW1Rvq6Ra4,1284 +compressed_tensors/compressors/sparse_compressors/sparse_24_bitmask.py,sha256=_g139pe4iAFn5jvGIEk4v-qMoyx9ID6E88vriPSNYV4,8604 +compressed_tensors/compressors/sparse_compressors/sparse_bitmask.py,sha256=7zSr9bqkpuH1ivQpxtYBNxXIoElal7Jo1nSKpZN_IFk,5633 +compressed_tensors/compressors/sparse_quantized_compressors/__init__.py,sha256=4f_cwcKXB1nVVMoiKgTFAc8jAPjPLElo-Df_EDm1_xw,675 +compressed_tensors/compressors/sparse_quantized_compressors/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/compressors/sparse_quantized_compressors/__pycache__/marlin_24.cpython-310.pyc,, +compressed_tensors/compressors/sparse_quantized_compressors/marlin_24.py,sha256=BMIQWTLlnUvxy14iEJegtiP75WHJeOVojey9mKOK1hE,9427 +compressed_tensors/config/__init__.py,sha256=8sOoZ6xvYSC79mBvEtO8l6xk4PC80d29AnnJiGMrY2M,737 +compressed_tensors/config/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/config/__pycache__/base.cpython-310.pyc,, +compressed_tensors/config/__pycache__/dense.cpython-310.pyc,, +compressed_tensors/config/__pycache__/sparse_24_bitmask.cpython-310.pyc,, +compressed_tensors/config/__pycache__/sparse_bitmask.cpython-310.pyc,, +compressed_tensors/config/base.py,sha256=R3iUmFf1MslEjin5LgwQbmfJHIsS7Uw0UIxfn780uqY,3479 +compressed_tensors/config/dense.py,sha256=NgSxnFCnckU9-iunxEaqiFwqgdO7YYxlWKR74jNbjks,1317 +compressed_tensors/config/sparse_24_bitmask.py,sha256=Lhj39zT2V1hxftprvxvneyhv45ShlXOKd75DBbDTyTE,1401 +compressed_tensors/config/sparse_bitmask.py,sha256=pZUboRNZTu6NajGOQEFExoPknak5ynVAUeiiYpS1Gt8,1308 +compressed_tensors/linear/__init__.py,sha256=fH6rjBYAxuwrTzBTlTjTgCYNyh6TCvCqajCz4Im4YrA,617 +compressed_tensors/linear/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/linear/__pycache__/compressed_linear.cpython-310.pyc,, +compressed_tensors/linear/compressed_linear.py,sha256=MJa-UfoKhIkdUWRD1shrXXri2cOwR5GK0a4t4bNYosM,3268 +compressed_tensors/quantization/__init__.py,sha256=83J5bPB7PavN2TfCoW7_vEDhfYpm4TDrqYO9vdSQ5bk,760 +compressed_tensors/quantization/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/quantization/__pycache__/quant_args.cpython-310.pyc,, +compressed_tensors/quantization/__pycache__/quant_config.cpython-310.pyc,, +compressed_tensors/quantization/__pycache__/quant_scheme.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/__init__.py,sha256=_uItzFWusyV74Zco_pHLOTdE9a83cL-R-ZdyQrBkIyw,772 +compressed_tensors/quantization/lifecycle/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/__pycache__/apply.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/__pycache__/compressed.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/__pycache__/forward.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/__pycache__/helpers.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/__pycache__/initialize.cpython-310.pyc,, +compressed_tensors/quantization/lifecycle/apply.py,sha256=XS4M6N1opKBybhkuQsS338QVb_CKMhUM5TUKrqoNQ0k,16517 +compressed_tensors/quantization/lifecycle/compressed.py,sha256=Fj9n66IN0EWsOAkBHg3O0GlOQpxstqjCcs0ttzMXrJ0,2296 +compressed_tensors/quantization/lifecycle/forward.py,sha256=DOWouUqfaLA4Qhg-ojVVBdhhSAlgZqFC26vZARxE0ko,12961 +compressed_tensors/quantization/lifecycle/helpers.py,sha256=C0mhy2vJ0fCjVeN4kFNhw8Eq1wkteBGHiZ36RVLThRY,944 +compressed_tensors/quantization/lifecycle/initialize.py,sha256=hymYtayTSumm8KCYAYPY267aWmlsJpt8oQFiRblk8qE,7452 +compressed_tensors/quantization/quant_args.py,sha256=jwC__lSmuiJ2qSJYYZGgWgQNbZu6YhhS0e-qugrTNXE,9058 +compressed_tensors/quantization/quant_config.py,sha256=vx06wBo91p4LCb3Vzd-2eCTUeIf_Sz2ZXRP263eQyjQ,10385 +compressed_tensors/quantization/quant_scheme.py,sha256=eQ0JrRZ80GX69fpwW87VzPzzhajhk4mUaJScjk82OY4,6010 +compressed_tensors/quantization/utils/__init__.py,sha256=VdtEmP0bvuND_IGQnyqUPc5lnFp-1_yD7StKSX4x80w,656 +compressed_tensors/quantization/utils/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/quantization/utils/__pycache__/helpers.cpython-310.pyc,, +compressed_tensors/quantization/utils/helpers.py,sha256=DBP-sGRpGAY01K0LFE7qqonNj4hkTYL_mXrMs2LtAD8,14100 +compressed_tensors/registry/__init__.py,sha256=FwLSNYqfIrb5JD_6OK_MT4_svvKTN_nEhpgQlQvGbjI,658 +compressed_tensors/registry/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/registry/__pycache__/registry.cpython-310.pyc,, +compressed_tensors/registry/registry.py,sha256=vRcjVB1ITfSbfYUaGndBBmqhip_5vsS62weorVg0iXo,11896 +compressed_tensors/utils/__init__.py,sha256=gS4gSU2pwcAbsKj-6YMaqhm25udFy6ISYaWBf-myRSM,808 +compressed_tensors/utils/__pycache__/__init__.cpython-310.pyc,, +compressed_tensors/utils/__pycache__/helpers.cpython-310.pyc,, +compressed_tensors/utils/__pycache__/offload.cpython-310.pyc,, +compressed_tensors/utils/__pycache__/permutations_24.cpython-310.pyc,, +compressed_tensors/utils/__pycache__/permute.cpython-310.pyc,, +compressed_tensors/utils/__pycache__/safetensors_load.cpython-310.pyc,, +compressed_tensors/utils/__pycache__/semi_structured_conversions.cpython-310.pyc,, +compressed_tensors/utils/helpers.py,sha256=xQHZXwIAAybC8mMTiAtWSOeggZMT1JOC6_wcDvlo2yk,10320 +compressed_tensors/utils/offload.py,sha256=cMmzd9IdlNbs29CReHj1PPSLUM6OWaT5YumlLT5eP3w,13845 +compressed_tensors/utils/permutations_24.py,sha256=kx6fsfDHebx94zsSzhXGyCyuC9sVyah6BUUir_StT28,2530 +compressed_tensors/utils/permute.py,sha256=V6tJLKo3Syccj-viv4F7ZKZgJeCB-hl-dK8RKI_kBwI,2355 +compressed_tensors/utils/safetensors_load.py,sha256=fBuoHVPoBt1mkvqFJ60zQIASX_4nhl0-6QfFS27NY8I,11430 +compressed_tensors/utils/semi_structured_conversions.py,sha256=XKNffPum54kPASgqKzgKvyeqWPAkair2XEQXjkp7ho8,13489 +compressed_tensors/version.py,sha256=cPIrNBysxeJxrC4lzqGpVUu_oM56xF851VDnvn1gsew,1585 diff --git a/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..79d5c89a71989389294854aa34e329701325f8b0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.45.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a05844b986341fe812005800f6def048765cb58 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/top_level.txt @@ -0,0 +1 @@ +compressed_tensors diff --git a/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/METADATA b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..abec0078be01304d8b680c50f1bfdaa43811608d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/METADATA @@ -0,0 +1,360 @@ +Metadata-Version: 2.3 +Name: einops +Version: 0.8.0 +Summary: A new flavour of deep learning operations +Project-URL: Homepage, https://github.com/arogozhnikov/einops +Author: Alex Rogozhnikov +License: MIT +License-File: LICENSE +Keywords: deep learning,einops,machine learning,neural networks,scientific computations,tensor manipulation +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Requires-Python: >=3.8 +Description-Content-Type: text/markdown + + + + + + +https://user-images.githubusercontent.com/6318811/177030658-66f0eb5d-e136-44d8-99c9-86ae298ead5b.mp4 + + + + +# einops +[![Run tests](https://github.com/arogozhnikov/einops/actions/workflows/run_tests.yml/badge.svg)](https://github.com/arogozhnikov/einops/actions/workflows/run_tests.yml) +[![PyPI version](https://badge.fury.io/py/einops.svg)](https://badge.fury.io/py/einops) +[![Documentation](https://img.shields.io/badge/documentation-link-blue.svg)](https://einops.rocks/) +![Supported python versions](https://raw.githubusercontent.com/arogozhnikov/einops/master/docs/resources/python_badge.svg) + + +Flexible and powerful tensor operations for readable and reliable code.
+Supports numpy, pytorch, tensorflow, jax, and [others](#supported-frameworks). + +## Recent updates: + +- 0.7.0: no-hassle `torch.compile`, support of [array api standard](https://data-apis.org/array-api/latest/API_specification/index.html) and more +- 10'000🎉: github reports that more than 10k project use einops +- einops 0.6.1: paddle backend added +- einops 0.6 introduces [packing and unpacking](https://github.com/arogozhnikov/einops/blob/master/docs/4-pack-and-unpack.ipynb) +- einops 0.5: einsum is now a part of einops +- [Einops paper](https://openreview.net/pdf?id=oapKSVM2bcj) is accepted for oral presentation at ICLR 2022 (yes, it worth reading). + Talk recordings are [available](https://iclr.cc/virtual/2022/oral/6603) + + +
+Previous updates +- flax and oneflow backend added +- torch.jit.script is supported for pytorch layers +- powerful EinMix added to einops. [Einmix tutorial notebook](https://github.com/arogozhnikov/einops/blob/master/docs/3-einmix-layer.ipynb) +
+ + + + +## Tweets + +> In case you need convincing arguments for setting aside time to learn about einsum and einops... +[Tim Rocktäschel](https://twitter.com/_rockt/status/1230818967205425152) + +> Writing better code with PyTorch and einops 👌 +[Andrej Karpathy](https://twitter.com/karpathy/status/1290826075916779520) + +> Slowly but surely, einops is seeping in to every nook and cranny of my code. If you find yourself shuffling around bazillion dimensional tensors, this might change your life +[Nasim Rahaman](https://twitter.com/nasim_rahaman/status/1216022614755463169) + +[More testimonials](https://einops.rocks/pages/testimonials/) + + + +## Contents + +- [Installation](#Installation) +- [Documentation](https://einops.rocks/) +- [Tutorial](#Tutorials) +- [API micro-reference](#API) +- [Why using einops](#Why-using-einops-notation) +- [Supported frameworks](#Supported-frameworks) +- [Citing](#Citing) +- [Repository](https://github.com/arogozhnikov/einops) and [discussions](https://github.com/arogozhnikov/einops/discussions) + +## Installation + +Plain and simple: +```bash +pip install einops +``` + + + +## Tutorials + +Tutorials are the most convenient way to see `einops` in action + +- part 1: [einops fundamentals](https://github.com/arogozhnikov/einops/blob/master/docs/1-einops-basics.ipynb) +- part 2: [einops for deep learning](https://github.com/arogozhnikov/einops/blob/master/docs/2-einops-for-deep-learning.ipynb) +- part 3: [packing and unpacking](https://github.com/arogozhnikov/einops/blob/master/docs/4-pack-and-unpack.ipynb) +- part 4: [improve pytorch code with einops](http://einops.rocks/pytorch-examples.html) + +Kapil Sachdeva recorded a small [intro to einops](https://www.youtube.com/watch?v=xGy75Pjsqzo). + +## API + +`einops` has a minimalistic yet powerful API. + +Three core operations provided ([einops tutorial](https://github.com/arogozhnikov/einops/blob/master/docs/) +shows those cover stacking, reshape, transposition, squeeze/unsqueeze, repeat, tile, concatenate, view and numerous reductions) + +```python +from einops import rearrange, reduce, repeat +# rearrange elements according to the pattern +output_tensor = rearrange(input_tensor, 't b c -> b c t') +# combine rearrangement and reduction +output_tensor = reduce(input_tensor, 'b c (h h2) (w w2) -> b h w c', 'mean', h2=2, w2=2) +# copy along a new axis +output_tensor = repeat(input_tensor, 'h w -> h w c', c=3) +``` + +Later additions to the family are `pack` and `unpack` functions (better than stack/split/concatenate): + +```python +from einops import pack, unpack +# pack and unpack allow reversibly 'packing' multiple tensors into one. +# Packed tensors may be of different dimensionality: +packed, ps = pack([class_token_bc, image_tokens_bhwc, text_tokens_btc], 'b * c') +class_emb_bc, image_emb_bhwc, text_emb_btc = unpack(transformer(packed), ps, 'b * c') +``` + +Finally, einops provides einsum with a support of multi-lettered names: + +```python +from einops import einsum, pack, unpack +# einsum is like ... einsum, generic and flexible dot-product +# but 1) axes can be multi-lettered 2) pattern goes last 3) works with multiple frameworks +C = einsum(A, B, 'b t1 head c, b t2 head c -> b head t1 t2') +``` + +### EinMix + +`EinMix` is a generic linear layer, perfect for MLP Mixers and similar architectures. + +### Layers + +Einops provides layers (`einops` keeps a separate version for each framework) that reflect corresponding functions + +```python +from einops.layers.torch import Rearrange, Reduce +from einops.layers.tensorflow import Rearrange, Reduce +from einops.layers.flax import Rearrange, Reduce +from einops.layers.paddle import Rearrange, Reduce +from einops.layers.chainer import Rearrange, Reduce +``` + +
+Example of using layers within a pytorch model +Example given for pytorch, but code in other frameworks is almost identical + +```python +from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, ReLU +from einops.layers.torch import Rearrange + +model = Sequential( + ..., + Conv2d(6, 16, kernel_size=5), + MaxPool2d(kernel_size=2), + # flattening without need to write forward + Rearrange('b c h w -> b (c h w)'), + Linear(16*5*5, 120), + ReLU(), + Linear(120, 10), +) +``` + +No more flatten needed! + +Additionally, torch users will benefit from layers as those are script-able and compile-able. +
+ + + + +## Naming + +`einops` stands for Einstein-Inspired Notation for operations +(though "Einstein operations" is more attractive and easier to remember). + +Notation was loosely inspired by Einstein summation (in particular by `numpy.einsum` operation). + +## Why use `einops` notation?! + + +### Semantic information (being verbose in expectations) + +```python +y = x.view(x.shape[0], -1) +y = rearrange(x, 'b c h w -> b (c h w)') +``` +While these two lines are doing the same job in *some* context, +the second one provides information about the input and output. +In other words, `einops` focuses on interface: *what is the input and output*, not *how* the output is computed. + +The next operation looks similar: + +```python +y = rearrange(x, 'time c h w -> time (c h w)') +``` +but it gives the reader a hint: +this is not an independent batch of images we are processing, +but rather a sequence (video). + +Semantic information makes the code easier to read and maintain. + +### Convenient checks + +Reconsider the same example: + +```python +y = x.view(x.shape[0], -1) # x: (batch, 256, 19, 19) +y = rearrange(x, 'b c h w -> b (c h w)') +``` +The second line checks that the input has four dimensions, +but you can also specify particular dimensions. +That's opposed to just writing comments about shapes since comments don't prevent mistakes, not tested, and without code review tend to be outdated +```python +y = x.view(x.shape[0], -1) # x: (batch, 256, 19, 19) +y = rearrange(x, 'b c h w -> b (c h w)', c=256, h=19, w=19) +``` + +### Result is strictly determined + +Below we have at least two ways to define the depth-to-space operation +```python +# depth-to-space +rearrange(x, 'b c (h h2) (w w2) -> b (c h2 w2) h w', h2=2, w2=2) +rearrange(x, 'b c (h h2) (w w2) -> b (h2 w2 c) h w', h2=2, w2=2) +``` +There are at least four more ways to do it. Which one is used by the framework? + +These details are ignored, since *usually* it makes no difference, +but it can make a big difference (e.g. if you use grouped convolutions in the next stage), +and you'd like to specify this in your code. + + +### Uniformity + +```python +reduce(x, 'b c (x dx) -> b c x', 'max', dx=2) +reduce(x, 'b c (x dx) (y dy) -> b c x y', 'max', dx=2, dy=3) +reduce(x, 'b c (x dx) (y dy) (z dz) -> b c x y z', 'max', dx=2, dy=3, dz=4) +``` +These examples demonstrated that we don't use separate operations for 1d/2d/3d pooling, +those are all defined in a uniform way. + +Space-to-depth and depth-to space are defined in many frameworks but how about width-to-height? Here you go: + +```python +rearrange(x, 'b c h (w w2) -> b c (h w2) w', w2=2) +``` + +### Framework independent behavior + +Even simple functions are defined differently by different frameworks + +```python +y = x.flatten() # or flatten(x) +``` + +Suppose `x`'s shape was `(3, 4, 5)`, then `y` has shape ... + +- numpy, pytorch, cupy, chainer: `(60,)` +- keras, tensorflow.layers, gluon: `(3, 20)` + +`einops` works the same way in all frameworks. + +### Independence of framework terminology + +Example: `tile` vs `repeat` causes lots of confusion. To copy image along width: +```python +np.tile(image, (1, 2)) # in numpy +image.repeat(1, 2) # pytorch's repeat ~ numpy's tile +``` + +With einops you don't need to decipher which axis was repeated: +```python +repeat(image, 'h w -> h (tile w)', tile=2) # in numpy +repeat(image, 'h w -> h (tile w)', tile=2) # in pytorch +repeat(image, 'h w -> h (tile w)', tile=2) # in tf +repeat(image, 'h w -> h (tile w)', tile=2) # in jax +repeat(image, 'h w -> h (tile w)', tile=2) # in cupy +... (etc.) +``` + +[Testimonials](https://einops.rocks/pages/testimonials/) provide users' perspective on the same question. + +## Supported frameworks + +Einops works with ... + +- [numpy](http://www.numpy.org/) +- [pytorch](https://pytorch.org/) +- [tensorflow](https://www.tensorflow.org/) +- [jax](https://github.com/google/jax) +- [cupy](https://cupy.chainer.org/) +- [chainer](https://chainer.org/) +- [tf.keras](https://www.tensorflow.org/guide/keras) +- [flax](https://github.com/google/flax) (experimental) +- [paddle](https://github.com/PaddlePaddle/Paddle) (experimental) +- [oneflow](https://github.com/Oneflow-Inc/oneflow) (community) +- [tinygrad](https://github.com/tinygrad/tinygrad) (community) + +Additionally, starting from einops 0.7.0 einops can be used with any framework that supports [Python array API standard](https://data-apis.org/array-api/latest/API_specification/index.html) + +## Citing einops + +Please use the following bibtex record + +```text +@inproceedings{ + rogozhnikov2022einops, + title={Einops: Clear and Reliable Tensor Manipulations with Einstein-like Notation}, + author={Alex Rogozhnikov}, + booktitle={International Conference on Learning Representations}, + year={2022}, + url={https://openreview.net/forum?id=oapKSVM2bcj} +} +``` + + +## Supported python versions + +`einops` works with python 3.8 or later. diff --git a/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/RECORD b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2bae542cf26f687cbb4ff327c52ab5f5af6a9317 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/RECORD @@ -0,0 +1,43 @@ +einops-0.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +einops-0.8.0.dist-info/METADATA,sha256=5hTpaWnwYNe3QvhbXYTpA_LUJ2lSlyspSc0gRGni7sY,12926 +einops-0.8.0.dist-info/RECORD,, +einops-0.8.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +einops-0.8.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87 +einops-0.8.0.dist-info/licenses/LICENSE,sha256=MNmENkKW9R_67K1LAe4SfpUlDFBokY1LZvyWIGcj5DQ,1073 +einops/__init__.py,sha256=UdixJ9CShlEOQfw0xcU6zYtrAn6Durgh6jCQWdcaQK4,422 +einops/__pycache__/__init__.cpython-310.pyc,, +einops/__pycache__/_backends.cpython-310.pyc,, +einops/__pycache__/_torch_specific.cpython-310.pyc,, +einops/__pycache__/array_api.cpython-310.pyc,, +einops/__pycache__/einops.cpython-310.pyc,, +einops/__pycache__/packing.cpython-310.pyc,, +einops/__pycache__/parsing.cpython-310.pyc,, +einops/_backends.py,sha256=VHPPrL1mf0PDTvyFPZvmZeTqGJoWflqv7b-eoJUHudo,21081 +einops/_torch_specific.py,sha256=yMaQeqAZhBLWR1Q-Jv6uRINJfzROhLb-rzKKevpefUU,4138 +einops/array_api.py,sha256=FcKZSo7l8jC5HL8qudutz1K5x9cFpwACMDcjfbvEKmQ,5251 +einops/einops.py,sha256=AYZe5yMlH-EXO0MWFv27ajyPdVTFpYloaSCRM9jw5sA,37252 +einops/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +einops/experimental/__pycache__/__init__.cpython-310.pyc,, +einops/experimental/__pycache__/indexing.cpython-310.pyc,, +einops/experimental/indexing.py,sha256=4NtRNmSOrpUURvwhrbbGNK3NeTxHI4EW8R6ct3JZyLw,14868 +einops/layers/__init__.py,sha256=vBtnAt2afs4QlqpeFU4dlZNxBuC9IXl3fmilk-2OzHM,3747 +einops/layers/__pycache__/__init__.cpython-310.pyc,, +einops/layers/__pycache__/_einmix.cpython-310.pyc,, +einops/layers/__pycache__/chainer.cpython-310.pyc,, +einops/layers/__pycache__/flax.cpython-310.pyc,, +einops/layers/__pycache__/keras.cpython-310.pyc,, +einops/layers/__pycache__/oneflow.cpython-310.pyc,, +einops/layers/__pycache__/paddle.cpython-310.pyc,, +einops/layers/__pycache__/tensorflow.cpython-310.pyc,, +einops/layers/__pycache__/torch.cpython-310.pyc,, +einops/layers/_einmix.py,sha256=0cl3r4Xp44S2HO-tx0MHa4cMFD2KJXpG5O-4gJM5AtU,8464 +einops/layers/chainer.py,sha256=hUB-XSjN5CP8zALZtalL3n2lQkq7vymftRI8okEMO2Q,1861 +einops/layers/flax.py,sha256=zFy83gSLRm31cLuKFRvZ82_HsefnXPbRvkKZh1KkC1I,2536 +einops/layers/keras.py,sha256=-7So0w94phvf9HdW0xi2mSeBg02qVPvAyfp_1XR02NM,212 +einops/layers/oneflow.py,sha256=YEPzz4xc7BDRQfb8ulD3teqQJdbO6qQg7Z4KIPVTLz8,1864 +einops/layers/paddle.py,sha256=8cRZQ8BT9vYEczh7pNProuTM_3XjLty2ht2sdvXNFiI,1907 +einops/layers/tensorflow.py,sha256=T9uhSVwbXREahc31ARAHoN5K-7zsuS8NRNPdY6Zk1Bc,3324 +einops/layers/torch.py,sha256=504G99kEgy7dk1UPBbj9hzJmZkAHwVhMDFN_8J-p3C8,2399 +einops/packing.py,sha256=Ln2lAMko9hobi_qd-4dPtQY0Ks5hRK7x-5FthL2gunk,7654 +einops/parsing.py,sha256=xbqcvwReLiROEucoegZ20WQiEHlLg0uxo_vYoezKB_4,6746 +einops/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..516596c76787b10928cbab24f22c0ea00433b15d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.24.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/licenses/LICENSE b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..3a654e906619009358eb2cfe80609bd12b43fa7f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Alex Rogozhnikov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/__init__.py b/vllm/lib/python3.10/site-packages/jsonschema_specifications/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a4235741bfdd251e270ce3b26fa7fb1bf4044b78 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/__init__.py @@ -0,0 +1,12 @@ +""" +The JSON Schema meta-schemas and vocabularies, exposed as a Registry. +""" + +from referencing.jsonschema import EMPTY_REGISTRY as _EMPTY_REGISTRY + +from jsonschema_specifications._core import _schemas + +#: A `referencing.jsonschema.SchemaRegistry` containing all of the official +#: meta-schemas and vocabularies. +REGISTRY = (_schemas() @ _EMPTY_REGISTRY).crawl() +__all__ = ["REGISTRY"] diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a8cf22d01d2b04bbd047e5361bf8e948429267 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc b/vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6da77df32c1b869b542391cd3344f56601b138e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/_core.py b/vllm/lib/python3.10/site-packages/jsonschema_specifications/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..e67bd712b3732f20ec353ba1d8b5c806fad2c74a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/_core.py @@ -0,0 +1,38 @@ +""" +Load all the JSON Schema specification's official schemas. +""" + +import json + +try: + from importlib.resources import files +except ImportError: + from importlib_resources import ( # type: ignore[import-not-found, no-redef] + files, + ) + +from referencing import Resource + + +def _schemas(): + """ + All schemas we ship. + """ + # importlib.resources.abc.Traversal doesn't have nice ways to do this that + # I'm aware of... + # + # It can't recurse arbitrarily, e.g. no ``.glob()``. + # + # So this takes some liberties given the real layout of what we ship + # (only 2 levels of nesting, no directories within the second level). + + for version in files(__package__).joinpath("schemas").iterdir(): + if version.name.startswith("."): + continue + for child in version.iterdir(): + children = [child] if child.is_file() else child.iterdir() + for path in children: + if path.name.startswith("."): + continue + contents = json.loads(path.read_text(encoding="utf-8")) + yield Resource.from_contents(contents) diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..d5e2d31c3c88e61f4c204cb6616887bba7e105dd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json @@ -0,0 +1,58 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content new file mode 100644 index 0000000000000000000000000000000000000000..2f6e056a9ac2399582eaf05985a26acf9161f213 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content @@ -0,0 +1,17 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core new file mode 100644 index 0000000000000000000000000000000000000000..dfc092d9646e1ea89e4d771bfcf2a4ca87eeac75 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core @@ -0,0 +1,51 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format new file mode 100644 index 0000000000000000000000000000000000000000..09bbfdda972ca77cd26e290d70ab59db4bbe8a27 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation new file mode 100644 index 0000000000000000000000000000000000000000..51ef7ea118c34e8ac3da66c973bae02b9e8325d8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated new file mode 100644 index 0000000000000000000000000000000000000000..5f62a3ffa20be07dc813d6b8a154fa8451d0dc61 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..8b26b1f89f9852e4f69f466dd7d718b21ba5b989 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json @@ -0,0 +1,172 @@ +{ + "$schema" : "http://json-schema.org/draft-03/schema#", + "id" : "http://json-schema.org/draft-03/schema#", + "type" : "object", + + "properties" : { + "type" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true, + "default" : "any" + }, + + "properties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "patternProperties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalProperties" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "items" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalItems" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "required" : { + "type" : "boolean", + "default" : false + }, + + "dependencies" : { + "type" : "object", + "additionalProperties" : { + "type" : ["string", "array", {"$ref" : "#"}], + "items" : { + "type" : "string" + } + }, + "default" : {} + }, + + "minimum" : { + "type" : "number" + }, + + "maximum" : { + "type" : "number" + }, + + "exclusiveMinimum" : { + "type" : "boolean", + "default" : false + }, + + "exclusiveMaximum" : { + "type" : "boolean", + "default" : false + }, + + "minItems" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxItems" : { + "type" : "integer", + "minimum" : 0 + }, + + "uniqueItems" : { + "type" : "boolean", + "default" : false + }, + + "pattern" : { + "type" : "string", + "format" : "regex" + }, + + "minLength" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxLength" : { + "type" : "integer" + }, + + "enum" : { + "type" : "array", + "minItems" : 1, + "uniqueItems" : true + }, + + "default" : { + "type" : "any" + }, + + "title" : { + "type" : "string" + }, + + "description" : { + "type" : "string" + }, + + "format" : { + "type" : "string" + }, + + "divisibleBy" : { + "type" : "number", + "minimum" : 0, + "exclusiveMinimum" : true, + "default" : 1 + }, + + "disallow" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true + }, + + "extends" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "id" : { + "type" : "string" + }, + + "$ref" : { + "type" : "string" + }, + + "$schema" : { + "type" : "string", + "format" : "uri" + } + }, + + "dependencies" : { + "exclusiveMinimum" : "minimum", + "exclusiveMaximum" : "maximum" + }, + + "default" : {} +} diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__init__.py b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55a868fd4beafa627dbb4005da8489228d30a7a5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..501f00d31f7d118cbdf0241d59b71dca4bd74288 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py new file mode 100644 index 0000000000000000000000000000000000000000..fd2927e0c8381f3eb93029df155758c05ed5eb26 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py @@ -0,0 +1,41 @@ +from collections.abc import Mapping +from pathlib import Path + +import pytest + +from jsonschema_specifications import REGISTRY + + +def test_it_contains_metaschemas(): + schema = REGISTRY.contents("http://json-schema.org/draft-07/schema#") + assert isinstance(schema, Mapping) + assert schema["$id"] == "http://json-schema.org/draft-07/schema#" + assert schema["title"] == "Core schema meta-schema" + + +def test_it_is_crawled(): + assert REGISTRY.crawl() == REGISTRY + + +@pytest.mark.parametrize( + "ignored_relative_path", + ["schemas/.DS_Store", "schemas/draft7/.DS_Store"], +) +def test_it_copes_with_dotfiles(ignored_relative_path): + """ + Ignore files like .DS_Store if someone has actually caused one to exist. + + We test here through the private interface as of course the global has + already loaded our schemas. + """ + + import jsonschema_specifications + + package = Path(jsonschema_specifications.__file__).parent + + ignored = package / ignored_relative_path + ignored.touch() + try: + list(jsonschema_specifications._schemas()) + finally: + ignored.unlink() diff --git a/vllm/lib/python3.10/site-packages/partial_json_parser/__pycache__/playground.cpython-310.pyc b/vllm/lib/python3.10/site-packages/partial_json_parser/__pycache__/playground.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82e7becd63e831771bb846ebe25694b2456fd155 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/partial_json_parser/__pycache__/playground.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/LICENSE.txt b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..18756235ea57da332316fa67547d223aa57e0207 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/LICENSE.txt @@ -0,0 +1,15 @@ +* Copyright (C) 2015-2016 Data61, Commonwealth Scientific and Industrial Research Organisation (CSIRO). +* See the LICENCE.txt file distributed with this work for additional +* information regarding copyright ownership. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/METADATA b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..a70db076c0377831330e6bee3d8b10ea658083e9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/METADATA @@ -0,0 +1,49 @@ +Metadata-Version: 2.1 +Name: pyairports +Version: 2.1.1 +Summary: Airport and other locations database +Home-page: https://github.com/ozeliger/pyairports +Author: Jason Dsouza +License: Apache Software License +Keywords: Airports +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +License-File: LICENSE.txt + +# pyairports + +pyairports is a package which enables airport lookup by 3-letter IATA code. + +# Usage + +The package can be used in two different ways + +## import + +To use the package in python code, import and create a local instance of the Airports object. + +```python +from pyairports.airports import Airports +airports = Airports() +airports.airport_iata(iata) # namedtuple(airport, [name, city, country, iata, icao, lat, lon, alt, tz, dst, tzdb]) or AirportNotFoundException +airports.other_iata(iata) # namedtuple(other, [iata, name, country, subdiv, type, lat, lon]) or AirportNotFoundException +airports.lookup(iata) # namedtuple(airport) or namedtuple(other) or AirportNotFoundException +``` + +## command line + +An entrypoint is created for command line querying: + +``` +[username@hostname ~]$ pyairports aaa +airport(name='Anaa', city='Anaa', country='French Polynesia', iata='AAA', icao='NTGA', lat='-17.352606', lon='-145.509956', alt='10', tz='-10', dst='U', tzdb='Pacific/Tahiti') +``` + + diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/RECORD b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2544d6e77fcbc727267abf43100f4ca51f6aacef --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/RECORD @@ -0,0 +1,15 @@ +../../../bin/pyairports,sha256=4dbRLU70jzBMDWMFfG9CApPzqOyO2BFww96DJKXo36E,228 +pyairports-2.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyairports-2.1.1.dist-info/LICENSE.txt,sha256=dFkeq7GDdn8rei3RgIAXA4DZqg5GPe4gKE92F2g-1OQ,760 +pyairports-2.1.1.dist-info/METADATA,sha256=yl9-6iXyt5ud9Dwn-0qe9eiIliASbdut7BHG9ty8Qqs,1675 +pyairports-2.1.1.dist-info/RECORD,, +pyairports-2.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyairports-2.1.1.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +pyairports-2.1.1.dist-info/entry_points.txt,sha256=l8Agxn4U2YO3GQ8yuVvNKrHx_eHBfFrphsjQW0mvc6Q,57 +pyairports-2.1.1.dist-info/top_level.txt,sha256=LJof-BN7ZIoxpLVIhWSmXdHfgH_nfzt2h8VTHYjElWM,11 +pyairports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyairports/__pycache__/__init__.cpython-310.pyc,, +pyairports/__pycache__/airports.cpython-310.pyc,, +pyairports/airports.py,sha256=ld3KTbWnxMHJHT0b2MZ_gaAuuN7aloe1IAKbtIu0FsA,3728 +pyairports/data/airport_list.json,sha256=_MIE-PBraJOV6Rw_3i2iC1oUlCOlHg66z0ipGSXzVME,981892 +pyairports/data/other_list.json,sha256=eQ3sDqvtN_wAD9NXk7LD3VrDCAb4CMXuSlnBWHcb3A4,341114 diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/entry_points.txt b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc6a2fecb4fe5d46dfc07805b9b08b56b2c31008 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +pyairports = pyairports.airports:main + diff --git a/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdf2675967b7e902b5fe13e306c4d3ec4b224b07 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pyairports-2.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pyairports diff --git a/vllm/lib/python3.10/site-packages/triton/language/core.py b/vllm/lib/python3.10/site-packages/triton/language/core.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d3266e9a9f94b3071c37b20d28a559c5073f82 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/core.py @@ -0,0 +1,2621 @@ +from __future__ import annotations + +from warnings import warn +from contextlib import contextmanager +from enum import Enum +from functools import partial, wraps +import typing +from typing import Union, Callable, List, Sequence, TypeVar, Optional +import builtins +from ..runtime.jit import jit +import inspect +import os + +from .._C.libtriton import ir +from . import semantic + +T = TypeVar('T') + +TRITON_MAX_TENSOR_NUMEL = 1048576 + +TRITON_BUILTIN = "__triton_builtin__" + +PropagateNan = ir.PROPAGATE_NAN + + +def builtin(fn: T) -> T: + """Mark a function as a builtin.""" + assert callable(fn) + + @wraps(fn) + def wrapper(*args, **kwargs): + if "_builder" not in kwargs or kwargs["_builder"] is None: + raise ValueError("Did you forget to add @triton.jit ? " + "(`_builder` argument must be provided outside of JIT functions.)") + return fn(*args, **kwargs) + + setattr(wrapper, TRITON_BUILTIN, True) + + return wrapper + + +def _tensor_member_fn(fn: T) -> T: + """Decorator that adds this free function as a member fn on class tensor. + + When called as a member function on class tensor, the first argument to `fn` + is `self`, i.e. the tensor object. + + If there are multiple decorators on a function, you probably want this one + to be the highest one (i.e. furthest from the function's `def`), so it's + applied last. + + Unfortunately you still need to add a type stub to the body of class tensor + in order for pytype to know about it. + """ + assert callable(fn) + orig_sig = inspect.signature(fn) + # Does fn take args other than _builder, _generator, and the tensor itself? + has_args = len(orig_sig.parameters.keys() - {"_builder", "_generator"}) > 1 + + if not fn.__doc__: + fn.__doc__ = "" + fn.__doc__ += f""" + This function can also be called as a member function on :py:class:`tensor`, + as :code:`x.{fn.__name__}({"..." if has_args else ""})` instead of + :code:`{fn.__name__}(x{", ..." if has_args else ""})`. + """ + + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + + # Match the signature of `fn`, but change the first arg to `self` so the + # docs are a little less weird. + new_params = list(orig_sig.parameters.values()) + new_params[0] = new_params[0].replace(name='self') + new_sig = orig_sig.replace(parameters=new_params) + wrapper.__signature__ = new_sig + wrapper.__doc__ = f"Forwards to :py:func:`{fn.__name__}` free function" + # If fn is a builtin, mark the wrapper as a builtin too. + if is_builtin(fn): + setattr(wrapper, TRITON_BUILTIN, True) + + setattr(tensor, fn.__name__, wrapper) + return fn + + +def _unwrap_iterable(x): + """Returns x[0] if x has one element and x[0] is iterable.""" + if len(x) == 1: + # Determine whether x[0] is iterable. + # + # You might want to use collections.abc.Iterable instead of this + # try/except block. Unfortunately, this doesn't work with constexpr. + # + # The problem is that abc.Iterable checks for __iter__ on the *class*. + # But we want constexpr to expose an __iter__ method if and only if the + # wrapped *object* (i.e. self.value) is iterable. Therefore there's no + # right answer for whether the class constexpr defines __iter__, and + # abc.Iterable doesn't work (at least not without some metaclass magic). + try: + iter(x[0]) + return x[0] + except TypeError: + pass + + return x + + +def is_builtin(fn) -> bool: + """Is this a registered triton builtin function?""" + return getattr(fn, TRITON_BUILTIN, False) + + +@builtin +def to_tensor(x, _builder=None): + return _to_tensor(x, _builder) + + +def _to_tensor(x, builder): + if isinstance(x, bool): + return tensor(builder.get_int1(x), int1) + # Note: compile-time const integers are represented by unsigned values + elif isinstance(x, int): + if -2**31 <= x < 2**31: + return tensor(builder.get_int32(x), int32) + elif 2**31 <= x < 2**32: + return tensor(builder.get_uint32(x), uint32) + elif -2**63 <= x < 2**63: + return tensor(builder.get_int64(x), int64) + elif 2**63 <= x < 2**64: + return tensor(builder.get_uint64(x), uint64) + else: + raise RuntimeError(f'Nonrepresentable integer {x}.') + elif isinstance(x, float): + min_float32 = 2**-126 + max_float32 = (2 - 2**-23) * 2**127 + abs_x = __builtins__['abs'](x) + if abs_x == float("inf") or\ + abs_x == 0.0 or \ + x != x or \ + min_float32 <= abs_x <= max_float32: + return tensor(builder.get_fp32(x), float32) + else: + return tensor(builder.get_fp64(x), float64) + + elif isinstance(x, constexpr): + return _to_tensor(x.value, builder) + elif isinstance(x, tensor): + return x + assert False, f"cannot convert {x} of type {type(x)} to tensor" + + +class dtype: + SINT_TYPES = ['int8', 'int16', 'int32', 'int64'] + UINT_TYPES = ['int1', 'uint8', 'uint16', 'uint32', 'uint64'] + FP_TYPES = ['fp8e4b15', 'fp8e4nv', 'fp8e4b8', 'fp8e5', 'fp8e5b16', 'fp16', 'bf16', 'fp32', 'fp64'] + STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64'] + OTHER_TYPES = ['void'] + + class SIGNEDNESS(Enum): + SIGNED = 0 + UNSIGNED = 1 + + def __init__(self, name): + if hasattr(name, 'value'): + name = name.value + self.name = name + assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name + if name in dtype.SINT_TYPES: + self.int_signedness = dtype.SIGNEDNESS.SIGNED + self.int_bitwidth = int(name.split('int')[-1]) + self.primitive_bitwidth = self.int_bitwidth + elif name in dtype.UINT_TYPES: + self.int_signedness = dtype.SIGNEDNESS.UNSIGNED + self.int_bitwidth = int(name.split('int')[-1]) + self.primitive_bitwidth = self.int_bitwidth + elif name in dtype.FP_TYPES: + if name == 'fp8e4b15': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp8e4nv': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 7 + elif name == 'fp8e4b8': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 8 + elif name == 'fp8e5': + self.fp_mantissa_width = 2 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp8e5b16': + self.fp_mantissa_width = 2 + self.primitive_bitwidth = 8 + self.exponent_bias = 16 + elif name == 'fp16': + self.fp_mantissa_width = 10 + self.primitive_bitwidth = 16 + self.exponent_bias = 15 + elif name == 'bf16': + self.fp_mantissa_width = 7 + self.primitive_bitwidth = 16 + self.exponent_bias = 127 + elif name == 'fp32': + self.fp_mantissa_width = 23 + self.primitive_bitwidth = 32 + self.exponent_bias = 127 + elif name == 'fp64': + self.fp_mantissa_width = 53 + self.primitive_bitwidth = 64 + self.exponent_bias = 1023 + else: + raise RuntimeError(f'Unsupported floating-point type {name}') + elif name == 'void': + self.primitive_bitwidth = 0 + + def is_fp8(self): + return 'fp8' in self.name + + def is_fp8e4nv(self): + return self.name == 'fp8e4nv' + + def is_fp8e4b8(self): + return self.name == 'fp8e4b8' + + def is_fp8e4b15(self): + return self.name == 'fp8e4b15' + + def is_fp8e5(self): + return self.name == 'fp8e5' + + def is_fp8e5b16(self): + return self.name == 'fp8e5b16' + + def is_fp16(self): + return self.name == 'fp16' + + def is_bf16(self): + return self.name == 'bf16' + + def is_fp32(self): + return self.name == 'fp32' + + def is_fp64(self): + return self.name == 'fp64' + + def is_int1(self): + return self.name == 'int1' + + def is_int8(self): + return self.name == 'int8' + + def is_int16(self): + return self.name == 'int16' + + def is_int32(self): + return self.name == 'int32' + + def is_int64(self): + return self.name == 'int64' + + def is_uint8(self): + return self.name == 'uint8' + + def is_uint16(self): + return self.name == 'uint16' + + def is_uint32(self): + return self.name == 'uint32' + + def is_uint64(self): + return self.name == 'uint64' + + def is_floating(self): + return self.name in dtype.FP_TYPES + + def is_standard_floating(self): + return self.name in dtype.STANDARD_FP_TYPES + + def is_int_signed(self): + return self.name in dtype.SINT_TYPES + + def is_int_unsigned(self): + return self.name in dtype.UINT_TYPES + + def is_int(self): + return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES + + def is_bool(self): + return self.is_int1() + + @staticmethod + def is_dtype(type_str): + return type_str in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES + + @staticmethod + def is_void(): + raise RuntimeError("Not implemented") + + @staticmethod + def is_block(): + return False + + @staticmethod + def is_ptr(): + return False + + @staticmethod + def is_const(): + return False + + def __eq__(self, other: dtype): + if not isinstance(other, dtype): + return False + return self.name == other.name + + def __ne__(self, other: dtype): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.name, )) + + @property + def scalar(self): + return self + + def to_ir(self, builder: ir.builder) -> ir.type: + if self.name == 'void': + return builder.get_void_ty() + elif self.name == 'int1': + return builder.get_int1_ty() + elif self.name in ('int8', 'uint8'): + return builder.get_int8_ty() + elif self.name in ('int16', 'uint16'): + return builder.get_int16_ty() + elif self.name in ('int32', 'uint32'): + return builder.get_int32_ty() + elif self.name in ('int64', 'uint64'): + return builder.get_int64_ty() + elif self.name == 'fp8e5': + return builder.get_fp8e5_ty() + elif self.name == 'fp8e5b16': + return builder.get_fp8e5b16_ty() + elif self.name == 'fp8e4nv': + return builder.get_fp8e4nv_ty() + elif self.name == 'fp8e4b8': + return builder.get_fp8e4b8_ty() + elif self.name == 'fp8e4b15': + return builder.get_fp8e4b15_ty() + elif self.name == 'fp16': + return builder.get_half_ty() + elif self.name == 'bf16': + return builder.get_bf16_ty() + elif self.name == 'fp32': + return builder.get_float_ty() + elif self.name == 'fp64': + return builder.get_double_ty() + raise ValueError(f'fail to convert {self} to ir type') + + def __str__(self): + return self.name + + def codegen_name(self): + if self.name.startswith("fp"): + return "float" + self.name[2:] + elif self.name.startswith("bf"): + return "bfloat" + self.name[2:] + else: + return self.name + + @property + def cache_key_part(self) -> str: + """See cache_key_part() in triton.cc.""" + return self.name + + def __repr__(self): + """Output of repr needs to be an evaluatable expression""" + return f'triton.language.{self.codegen_name()}' + + +# Some functions have a param named `dtype`, which shadows the `dtype` class. +# We can't change the param name because it is part of function's public API. +# Declare an alias so those functions can still reference the dtype class. +_DtypeClass = dtype + + +class pointer_type(dtype): + + def __init__(self, element_ty: dtype, address_space: int = 1): + if not isinstance(element_ty, dtype): + raise TypeError(f'element_ty is a {type(element_ty).__name__}.') + self.element_ty = element_ty + self.address_space = address_space + + self.name = f'pointer<{element_ty}>' + + def to_ir(self, builder: ir.builder) -> ir.pointer_type: + return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1) + + def __str__(self): + return self.name + + def __repr__(self): + return self.__str__() + + def is_ptr(self): + return True + + def __eq__(self, other: pointer_type) -> bool: + if not isinstance(other, pointer_type): + return False + return self.element_ty == other.element_ty and self.address_space == other.address_space + + def __ne__(self, other: pointer_type) -> bool: + return not self.__eq__(other) + + @property + def scalar(self): + return self + + +class const_pointer_type(pointer_type): + + def __init__(self, element_ty: dtype, address_space: int = 1): + super().__init__(element_ty, address_space) + + def __str__(self): + return f'const_pointer<{self.element_ty}>' + + def is_const(self): + return True + + def __eq__(self, other) -> bool: + if not isinstance(other, const_pointer_type): + return False + return self.element_ty == other.element_ty and self.address_space == other.address_space + + +class block_type(dtype): + + def __init__(self, element_ty: dtype, shape: List): + self.element_ty = element_ty + + # Note that block_type's shape is a list of int + # while tensor's shape is a list of constexpr. + + # shape can be empty ([]) when an input is a 0D tensor. + if not shape: + raise TypeError('0d block_type is forbidden') + if isinstance(shape[0], constexpr): + shape = [s.value for s in shape] + + self.shape = shape + self.numel = 1 + for s in self.shape: + self.numel *= s + if self.numel > TRITON_MAX_TENSOR_NUMEL: + raise ValueError(f"numel ({self.numel}) exceeds triton maximum tensor numel ({TRITON_MAX_TENSOR_NUMEL})") + + self.name = f'<{self.shape}, {self.element_ty}>' + + def to_ir(self, builder: ir.builder) -> ir.block_type: + return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape) + + def __str__(self): + return self.name + + def __repr__(self): + return self.__str__() + + def is_block(self): + return True + + def get_block_shapes(self) -> List[int]: + return self.shape + + def __eq__(self, other: block_type) -> bool: + if not isinstance(other, block_type): + return False + return self.element_ty == other.element_ty and self.shape == other.shape + + def __ne__(self, other: block_type) -> bool: + return not self.__eq__(other) + + @property + def scalar(self): + return self.element_ty + + +class function_type(dtype): + + def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None: + self.ret_types = ret_types + self.param_types = param_types + + def __str__(self): + return f'fn ({self.param_types}) -> {self.ret_types}' + + def to_ir(self, builder: ir.builder): + ir_param_types = [ty.to_ir(builder) for ty in self.param_types] + ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types] + return builder.get_function_ty(ir_param_types, ret_types) + + +# scalar types +void = dtype('void') +int1 = dtype('int1') +int8 = dtype('int8') +int16 = dtype('int16') +int32 = dtype('int32') +int64 = dtype('int64') +uint8 = dtype('uint8') +uint16 = dtype('uint16') +uint32 = dtype('uint32') +uint64 = dtype('uint64') +float8e5 = dtype('fp8e5') +float8e5b16 = dtype('fp8e5b16') +float8e4nv = dtype('fp8e4nv') +float8e4b8 = dtype('fp8e4b8') +float8e4b15 = dtype('fp8e4b15') +float16 = dtype('fp16') +bfloat16 = dtype('bf16') +float32 = dtype('fp32') +float64 = dtype('fp64') +# pointer types +pi32_t = pointer_type(int32) + + +def get_int_dtype(bitwidth: int, signed: bool) -> dtype: + if bitwidth == 1: + return int1 + elif bitwidth == 8 and signed: + return int8 + elif bitwidth == 8 and not signed: + return uint8 + elif bitwidth == 16 and signed: + return int16 + elif bitwidth == 16 and not signed: + return uint16 + elif bitwidth == 32 and signed: + return int32 + elif bitwidth == 32 and not signed: + return uint32 + elif bitwidth == 64 and signed: + return int64 + elif bitwidth == 64 and not signed: + return uint64 + else: + raise ValueError(f'Unsupported bitwidth {bitwidth} and signedness {signed}') + + +# ----------------------- +# constexpr +# ----------------------- + + +class const: + """ + This class is used as a type annotation to mark pointers to constant data. + The `store` function cannot be called with a pointer to const. Constness + is part of the pointer type and the usual Triton type consistency rules + apply. For example you cannot have a function that returns constant pointer + in one return statement and non-constant pointer in another. + """ + pass + + +class constexpr: + """ + This class is used to store a value that is known at compile-time. + """ + + def __init__(self, value): + if isinstance(value, constexpr): + self.value = value.value + else: + self.value = value + + def __repr__(self) -> str: + return f"constexpr[{self.value}]" + + def __index__(self): + return self.value + + # In interpreter mode, constant values are not wrapped in constexpr, + # and therefore do not have a .value attribute. + # As a result, from here and below, we need to call the _constexpr_to_value + # function to obtain either constexpr.value or the value itself. + def __add__(self, other): + return constexpr(self.value + _constexpr_to_value(other)) + + def __radd__(self, other): + return constexpr(_constexpr_to_value(other) + self.value) + + def __sub__(self, other): + return constexpr(self.value - _constexpr_to_value(other)) + + def __rsub__(self, other): + return constexpr(_constexpr_to_value(other) - self.value) + + def __mul__(self, other): + return constexpr(self.value * _constexpr_to_value(other)) + + def __mod__(self, other): + return constexpr(self.value % _constexpr_to_value(other)) + + def __rmul__(self, other): + return constexpr(_constexpr_to_value(other) * self.value) + + def __truediv__(self, other): + return constexpr(self.value / _constexpr_to_value(other)) + + def __rtruediv__(self, other): + return constexpr(_constexpr_to_value(other) / self.value) + + def __floordiv__(self, other): + return constexpr(self.value // _constexpr_to_value(other)) + + def __rfloordiv__(self, other): + return constexpr(_constexpr_to_value(other) // self.value) + + def __gt__(self, other): + return constexpr(self.value > _constexpr_to_value(other)) + + def __rgt__(self, other): + return constexpr(_constexpr_to_value(other) > self.value) + + def __ge__(self, other): + return constexpr(self.value >= _constexpr_to_value(other)) + + def __rge__(self, other): + return constexpr(_constexpr_to_value(other) >= self.value) + + def __lt__(self, other): + return constexpr(self.value < _constexpr_to_value(other)) + + def __rlt__(self, other): + return constexpr(_constexpr_to_value(other) < self.value) + + def __le__(self, other): + return constexpr(self.value <= _constexpr_to_value(other)) + + def __rle__(self, other): + return constexpr(_constexpr_to_value(other) <= self.value) + + def __eq__(self, other): + return constexpr(self.value == _constexpr_to_value(other)) + + def __ne__(self, other): + return constexpr(self.value != _constexpr_to_value(other)) + + def __bool__(self): + return bool(self.value) + + def __neg__(self): + return constexpr(-self.value) + + def __and__(self, other): + return constexpr(self.value & _constexpr_to_value(other)) + + def logical_and(self, other): + return constexpr(self.value and _constexpr_to_value(other)) + + def __or__(self, other): + return constexpr(self.value | _constexpr_to_value(other)) + + def __xor__(self, other): + return constexpr(self.value ^ _constexpr_to_value(other)) + + def logical_or(self, other): + return constexpr(self.value or _constexpr_to_value(other)) + + def __pos__(self): + return constexpr(+self.value) + + def __invert__(self): + return constexpr(~self.value) + + def __pow__(self, other): + return constexpr(self.value**_constexpr_to_value(other)) + + def __rpow__(self, other): + return constexpr(_constexpr_to_value(other)**self.value) + + def __rshift__(self, other): + return constexpr(self.value >> _constexpr_to_value(other)) + + def __lshift__(self, other): + return constexpr(self.value << _constexpr_to_value(other)) + + def __not__(self): + return constexpr(not self.value) + + def __iter__(self): + return iter(self.value) + + def __call__(self, *args, **kwds): + return self.value(*args, **kwds) + + +CONSTEXPR_0 = constexpr(0) + + +def check_bit_width(value, shift_value): + if isinstance(value, tensor) and isinstance(shift_value, constexpr): + bitwidth = value.type.scalar.primitive_bitwidth + if shift_value.value >= bitwidth: + warn( + f"Value {shift_value.value} exceeds the maximum bitwidth ({bitwidth}) for type '{value.dtype}'. This may result in undefined behavior." + ) + + +class tensor: + """Represents an N-dimensional array of values or pointers. + + :code:`tensor` is the fundamental data structure in Triton programs. Most + functions in :py:mod:`triton.language` operate on and return tensors. + + Most of the named member functions here are duplicates of the free functions + in :code:`triton.language`. For example, :code:`triton.language.sqrt(x)` is + equivalent to :code:`x.sqrt()`. + + :code:`tensor` also defines most of the magic/dunder methods, so you can + write :code:`x+y`, :code:`x << 2`, etc. + + .. rubric:: Constructors + .. + For some reason Sphinx includes __init__ before printing the full table + of methods. Not what I want, but I can't figure out how to fix it. Give + it its own section so it looks intentional. :) + """ + + def __init__(self, handle, type: dtype): + """Not called by user code.""" + # IR handle + self.handle = handle + # Block shape + self.shape = type.shape if type.is_block() else () + self.numel = 1 + for s in self.shape: + self.numel *= s + self.numel = constexpr(self.numel) + self.type = type # Tensor type (can be block_type) + # Following the practice in pytorch, dtype is scalar type + self.dtype = type.scalar + self.shape = [constexpr(s) for s in self.shape] + + def __str__(self) -> str: + # ex. "float32[16, 32]" + return str(self.dtype) + '[' + ', '.join(str(s) for s in self.shape) + ']' + + @builtin + def __add__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.add(self, other, _builder) + + @builtin + def __radd__(self, other, _builder=None): + return self.__add__(other, _builder=_builder) + + @builtin + def __sub__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.sub(self, other, _builder) + + @builtin + def __rsub__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.sub(other, self, _builder) + + @builtin + def __mul__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mul(self, other, _builder) + + @builtin + def __rmul__(self, other, _builder=None): + return self.__mul__(other, _builder=_builder) + + @builtin + def __truediv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.truediv(self, other, _builder) + + @builtin + def __rtruediv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.truediv(other, self, _builder) + + @builtin + def __floordiv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.floordiv(self, other, _builder) + + @builtin + def __rfloordiv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.floordiv(other, self, _builder) + + @builtin + def __mod__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mod(self, other, _builder) + + @builtin + def __rmod__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mod(other, self, _builder) + + # unary operators + @builtin + def __neg__(self, _builder=None): + return semantic.minus(self, _builder) + + @builtin + def __invert__(self, _builder=None): + return semantic.invert(self, _builder) + + # bitwise operators + + @builtin + def __and__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.and_(self, other, _builder) + + @builtin + def __rand__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.and_(other, self, _builder) + + @builtin + def __or__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.or_(self, other, _builder) + + @builtin + def __ror__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.or_(other, self, _builder) + + @builtin + def __xor__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.xor_(self, other, _builder) + + @builtin + def __rxor__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.xor_(other, self, _builder) + + @builtin + def __lshift__(self, other, _builder=None): + check_bit_width(self, other) + other = _to_tensor(other, _builder) + return semantic.shl(self, other, _builder) + + @builtin + def __rlshift__(self, other, _builder=None): + check_bit_width(other, self) + other = _to_tensor(other, _builder) + return semantic.shl(other, self, _builder) + + @builtin + def __rshift__(self, other, _builder=None): + check_bit_width(self, other) + other = _to_tensor(other, _builder) + if self.dtype.is_int_signed(): + return semantic.ashr(self, other, _builder) + else: + return semantic.lshr(self, other, _builder) + + @builtin + def __rrshift__(self, other, _builder=None): + check_bit_width(other, self) + other = _to_tensor(other, _builder) + if self.dtype.is_int_signed(): + return semantic.ashr(other, self, _builder) + else: + return semantic.lshr(other, self, _builder) + + # > + @builtin + def __gt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_than(self, other, _builder) + + @builtin + def __rgt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_than(other, self, _builder) + + # >= + @builtin + def __ge__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_equal(self, other, _builder) + + @builtin + def __rge__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_equal(other, self, _builder) + + # < + @builtin + def __lt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_than(self, other, _builder) + + @builtin + def __rlt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_than(other, self, _builder) + + # <= + @builtin + def __le__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_equal(self, other, _builder) + + @builtin + def __rle__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_equal(other, self, _builder) + + # == + @builtin + def __eq__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.equal(self, other, _builder) + + @builtin + def __req__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.equal(other, self, _builder) + + @builtin + def __ne__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.not_equal(self, other, _builder) + + @builtin + def __rne__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.not_equal(other, self, _builder) + + @builtin + def logical_and(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_and(self, other, _builder) + + @builtin + def logical_or(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_or(self, other, _builder) + + # note: __not__ isn't actually a magic method in python + # but it's ok because our ASTVisitor handles it + @builtin + def __not__(self, _builder=None): + return semantic.not_(self, _builder) + + @builtin + def __getitem__(self, slices, _builder=None): + if isinstance(slices, (slice, constexpr)) or slices is None: + slices = [slices] + ret = self + for dim, sl in enumerate(slices): + if sl is None or isinstance(sl, constexpr) and sl.value is None: + ret = semantic.expand_dims(ret, dim, _builder) + elif isinstance(sl, slice) and sl.start is None and sl.stop is None and sl.step is None: + pass + else: + raise ValueError(f"unsupported tensor index: {sl}") + return ret + + @property + def T(self): + """Transposes a 2D tensor.""" + assert False, "Transposition must be created by the AST Visitor" + + @builtin + def to(self, dtype: dtype, fp_downcast_rounding: Optional[str] = None, bitcast: bool = False, _builder=None): + """ + Alias for :py:func:`tensor.cast`. + """ + # Triton doesn't like core functions calling other core functions, so we + # just copy-paste the implementation of cast here. It's not too bad. + if isinstance(bitcast, constexpr): + bitcast = bitcast.value + if bitcast: + return semantic.bitcast(self, dtype, _builder) + return semantic.cast(self, dtype, _builder, fp_downcast_rounding) + + # Type stubs for functions added by the _tensor_member_fn decorator. + # (Unfortunately these can't be created automatically.) + # + # We couldn't write these definitions out even if we wanted to, because some + # of these functions are defined in standard.py. + def broadcast_to(self, *shape) -> tensor: + ... + + def trans(self, *dims) -> tensor: + ... + + def permute(self, *dims) -> tensor: + ... + + def split(self) -> tuple[tensor, tensor]: + ... + + def view(self, *shape) -> tensor: + ... + + def reshape(self, *shape) -> tensor: + ... + + def expand_dims(self, axis) -> tensor: + ... + + def cast(self, dtype, fp_downcast_rounding=None, bitcast=False) -> tensor: + ... + + def store(self, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="") -> tensor: + ... + + def advance(self, offsets) -> tensor: + ... + + def atomic_cas(self, cmp, val, sem=None, scope=None) -> tensor: + ... + + def atomic_xchg(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def atomic_add(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def atomic_max(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def atomic_min(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def atomic_and(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def atomic_or(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def atomic_xor(self, val, mask=None, sem=None, scope=None) -> tensor: + ... + + def exp(self) -> tensor: + ... + + def log(self) -> tensor: + ... + + def cos(self) -> tensor: + ... + + def sin(self) -> tensor: + ... + + def sqrt(self) -> tensor: + ... + + def rsqrt(self) -> tensor: + ... + + def abs(self) -> tensor: + ... + + def reduce(self, axis, combine_fn, keep_dims=False) -> tensor: + ... + + def associative_scan(self, axis, combine_fn, reverse=False) -> tensor: + ... + + def histogram(self, num_bins) -> tensor: + ... + + def cdiv(self, div) -> tensor: + ... + + def sigmoid(self) -> tensor: + ... + + def softmax(self, ieee_rounding=False) -> tensor: + ... + + def ravel(self) -> tensor: + ... + + def max(self, axis=None, return_indices=False, return_indices_tie_break_left=True, keep_dims=False) -> tensor: + ... + + def argmax(self, axis, tie_break_left=True, keep_dims=False) -> tensor: + ... + + def min(self, axis=None, return_indices=False, return_indices_tie_break_left=True, keep_dims=False) -> tensor: + ... + + def argmin(self, axis, tie_break_left=True, keep_dims=False) -> tensor: + ... + + def sum(self, axis=None, keep_dims=False) -> tensor: + ... + + def xor_sum(self, axis=None, keep_dims=False) -> tensor: + ... + + def cumsum(self, axis=0, reverse=False) -> tensor: + ... + + def cumprod(self, axis=0, reverse=False) -> tensor: + ... + + def sort(self, dim: constexpr = None, descending: constexpr = CONSTEXPR_0) -> tensor: + ... + + def flip(self, dim=None) -> tensor: + ... + + +def get_bool_env_var(var_name): + v = os.getenv(var_name, "0") + return v == "1" or v == "true" or v == "on" + + +# ----------------------- +# SPMD Programming Model +# ----------------------- +def _constexpr_to_value(v): + if isinstance(v, constexpr): + return v.value + return v + + +@builtin +def program_id(axis, _builder=None): + """ + Returns the id of the current program instance along the given :code:`axis`. + + :param axis: The axis of the 3D launch grid. Must be 0, 1 or 2. + :type axis: int + """ + # if axis == -1: + # pid0 = program_id(0, _builder) + # pid1 = program_id(1, _builder) + # pid2 = program_id(2, _builder) + # npg0 = num_programs(0, _builder) + # npg1 = num_programs(0, _builder) + # return pid0 + pid1*npg0 + pid2*npg0*npg1 + axis = _constexpr_to_value(axis) + return semantic.program_id(axis, _builder) + + +@builtin +def num_programs(axis, _builder=None): + """ + Returns the number of program instances launched along the given :code:`axis`. + + :param axis: The axis of the 3D launch grid. Must be 0, 1 or 2. + :type axis: int + """ + axis = _constexpr_to_value(axis) + return semantic.num_programs(axis, _builder) + + +# ----------------------- +# Block Initialization +# ----------------------- + + +@builtin +def arange(start, end, _builder=None): + """ + Returns contiguous values within the half-open interval :code:`[start, + end)`. :code:`end - start` must be less than or equal to + :code:`TRITON_MAX_TENSOR_NUMEL = 131072` + + :param start: Start of the interval. Must be a power of two. + :type start: int32 + :param end: End of the interval. Must be a power of two greater than + :code:`start`. + :type end: int32 + """ + start = _constexpr_to_value(start) + end = _constexpr_to_value(end) + return semantic.arange(start, end, _builder) + + +def _shape_check_impl(shape): + shape = _constexpr_to_value(shape) + for i, d in enumerate(shape): + if isinstance(d, int): + d = constexpr(d) + if not isinstance(d, constexpr): + raise TypeError(f"Shape element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + if d.value & (d.value - 1) != 0: + raise ValueError(f"Shape element {i} must be a power of 2") + return [_constexpr_to_value(x) for x in shape] + + +@builtin +def full(shape, value, dtype, _builder=None): + """ + Returns a tensor filled with the scalar value for the given :code:`shape` and :code:`dtype`. + + :param shape: Shape of the new array, e.g., (8, 16) or (8, ) + :value value: A scalar value to fill the array with + :type shape: tuple of ints + :param dtype: Data-type of the new array, e.g., :code:`tl.float16` + :type dtype: DType + """ + shape = _shape_check_impl(shape) + value = _constexpr_to_value(value) + dtype = _constexpr_to_value(dtype) + return semantic.full(shape, value, dtype, _builder) + + +# ----------------------- +# Shape Manipulation +# ----------------------- + + +@builtin +def broadcast(input, other, _builder=None): + """ + Tries to broadcast the two given blocks to a common compatible shape. + + :param input: The first input tensor. + :type input: Block + :param other: The second input tensor. + :type other: Block + """ + return semantic.broadcast_impl_value(input, other, _builder) + + +@_tensor_member_fn +@builtin +def broadcast_to(input, *shape, _builder=None): + """ + Tries to broadcast the given tensor to a new :code:`shape`. + + :param input: The input tensor. + :type input: Block + :param shape: The desired shape. + :type shape: + + :code:`shape` can be passed as a tuple or as individual parameters: :: + + # These are equivalent + broadcast_to(x, (32, 32)) + broadcast_to(x, 32, 32) + """ + shape = _shape_check_impl(_unwrap_iterable(shape)) + return semantic.broadcast_impl_shape(input, shape, _builder) + + +@_tensor_member_fn +@builtin +def trans(input: tensor, *dims, _builder=None): + """ + Permutes the dimensions of a tensor. + + If no permutation is specified, tries to do a (1,0) permutation, i.e. tries + to transpose a 2D tensor. + + :param input: The input tensor. + :param dims: The desired ordering of dimensions. For example, + :code:`(2, 1, 0)` reverses the order dims in a a 3D tensor. + + :code:`dims` can be passed as a tuple or as individual parameters: :: + + # These are equivalent + trans(x, (2, 1, 0)) + trans(x, 2, 1, 0) + + :py:func:`permute` is equivalent to this function, except it doesn't + have the special case when no permutation is specified. + """ + if not dims: + dims = (1, 0) + return semantic.permute(input, dims, _builder) + + +@_tensor_member_fn +@builtin +def permute(input, *dims, _builder=None): + """ + Permutes the dimensions of a tensor. + + :param input: The input tensor. + :type input: Block + :param dims: The desired ordering of dimensions. For example, + :code:`(2, 1, 0)` reverses the order dims in a a 3D tensor. + + :code:`dims` can be passed as a tuple or as individual parameters: :: + + # These are equivalent + permute(x, (2, 1, 0)) + permute(x, 2, 1, 0) + + :py:func:`trans` is equivalent to this function, except when + :code:`dims` is empty, it tries to do a (1,0) permutation. + """ + dims = _unwrap_iterable(dims) + return semantic.permute(input, dims, _builder) + + +@builtin +def cat(input, other, can_reorder=False, _builder=None): + """ + Concatenate the given blocks + + :param input: The first input tensor. + :type input: + :param other: The second input tensor. + :type other: + :param reorder: Compiler hint. If true, the compiler is + allowed to reorder elements while concatenating inputs. Only use if the + order does not matter (e.g., result is only used in reduction ops) + """ + return semantic.cat(input, other, can_reorder, _builder) + + +@builtin +def join(a, b, _builder=None): + """ + Join the given tensors in a new, minor dimension. + + For example, given two tensors of shape (4,8), produces a new tensor of + shape (4,8,2). Given two scalars, returns a tensor of shape (2). + + The two inputs are broadcasted to be the same shape. + + If you want to join more than two elements, you can use multiple calls to + this function. This reflects the constraint in Triton that tensors must + have power-of-two sizes. + + join is the inverse of split. + + :param a: The first input tensor. + :type a: Tensor + :param b: The second input tensor. + :type b: Tensor + """ + return semantic.join(a, b, _builder) + + +@jit +def _take_first(a, b): + return a + + +@_tensor_member_fn +@builtin +def split(a, _builder=None, _generator=None) -> tuple[tensor, tensor]: + """ + Split a tensor in two along its last dim, which must have size 2. + + For example, given a tensor of shape (4,8,2), produces two tensors of shape + (4,8). Given a tensor of shape (2), returns two scalars. + + If you want to split into more than two pieces, you can use multiple calls + to this function (probably plus calling reshape). This reflects the + constraint in Triton that tensors must have power-of-two sizes. + + split is the inverse of join. + + :param a: The tensor to split. + :type a: Tensor + """ + # If len(a.shape) == 1, i.e. a.shape == [2], we should return two scalars. + # But semantic.split can only handle returning tensors. Work around this by + # expanding the input to shape [1,2] and then reducing the result. + was_rank_1 = len(a.shape) == 1 + if was_rank_1: + a = semantic.expand_dims(a, 0, _builder) + + out_lhs, out_rhs = semantic.split(a, _builder) + + if was_rank_1: + # Currently `reduce` is the best way to convert a tensor of shape [1] to a scalar. + out_lhs = typing.cast(tensor, reduce(out_lhs, None, _take_first, _builder=_builder, _generator=_generator)) + out_rhs = typing.cast(tensor, reduce(out_rhs, None, _take_first, _builder=_builder, _generator=_generator)) + + return out_lhs, out_rhs + + +@_tensor_member_fn +@builtin +def view(input, *shape, _builder=None): + """ + Returns a tensor with the same elements as `input` but a different shape. + The order of the elements may not be preserved. + + :param input: The input tensor. + :type input: Block + :param shape: The desired shape. + + :code:`shape` can be passed as a tuple or as individual parameters: :: + + # These are equivalent + view(x, (32, 32)) + view(x, 32, 32) + """ + warn("view is deprecated, please use reshape with can_reorder being true.") + shape = _shape_check_impl(_unwrap_iterable(shape)) + return semantic.reshape(input, shape, can_reorder=True, builder=_builder) + + +@_tensor_member_fn +@builtin +def reshape(input, *shape, can_reorder=False, _builder=None): + """ + Returns a tensor with the same number of elements as input but with the + provided shape. + + :param input: The input tensor. + :type input: Block + :param shape: The new shape. + + :code:`shape ` can be passed as a tuple or as individual parameters: :: + + # These are equivalent + reshape(x, (32, 32)) + reshape(x, 32, 32) + """ + shape = _shape_check_impl(_unwrap_iterable(shape)) + return semantic.reshape(input, shape, can_reorder, _builder) + + +def _wrap_axis(axis, ndim): + if not (-ndim <= axis < ndim): + raise ValueError(f"invalid axis {axis}. Expected {-ndim} <= axis < {ndim}") + + return axis if axis >= 0 else axis + ndim + + +@_tensor_member_fn +@builtin +def expand_dims(input, axis, _builder=None): + """ + Expand the shape of a tensor, by inserting new length-1 dimensions. + + Axis indices are with respect to the resulting tensor, so + ``result.shape[axis]`` will be 1 for each axis. + + :param input: The input tensor. + :type input: tl.tensor + :param axis: The indices to add new axes + :type axis: int | Sequence[int] + + """ + input = _to_tensor(input, _builder) + axis = _constexpr_to_value(axis) + axes = list(axis) if isinstance(axis, Sequence) else [axis] + new_ndim = len(input.shape) + len(axes) + axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes] + + if len(set(axes)) != len(axes): + raise ValueError(f"expand_dims received duplicate axes, normalized axes = {axes}") + + ret = input + for a in sorted(axes): + ret = semantic.expand_dims(ret, a, _builder) + return ret + + +@_tensor_member_fn +@builtin +def cast(input, dtype: dtype, fp_downcast_rounding: Optional[str] = None, bitcast: bool = False, _builder=None): + """ + Casts a tensor to the given :code:`dtype`. + + :param dtype: The target data type. + :param fp_downcast_rounding: The rounding mode for downcasting + floating-point values. This parameter is only used when self is a + floating-point tensor and dtype is a floating-point type with a + smaller bitwidth. Supported values are :code:`"rtne"` (round to + nearest, ties to even) and :code:`"rtz"` (round towards zero). + :param bitcast: If true, the tensor is bitcasted to the given + :code:`dtype`, instead of being numerically casted. + """ + input = _to_tensor(input, _builder) + if isinstance(bitcast, constexpr): + bitcast = bitcast.value + if bitcast: + return semantic.bitcast(input, dtype, _builder) + return semantic.cast(input, dtype, _builder, fp_downcast_rounding) + + +# ----------------------- +# Linear Algebra +# ----------------------- + + +@builtin +def dot(input, other, acc=None, input_precision=None, allow_tf32=None, max_num_imprecise_acc=None, out_dtype=float32, + _builder=None): + """ + Returns the matrix product of two blocks. + + The two blocks must be two-dimensional and have compatible inner dimensions. + + :param input: The first tensor to be multiplied. + :type input: 2D tensor of scalar-type in {:code:`int8`, :code: `float8_e5m2`, :code:`float16`, :code:`bfloat16`, :code:`float32`} + :param other: The second tensor to be multiplied. + :type other: 2D tensor of scalar-type in {:code:`int8`, :code: `float8_e5m2`, :code:`float16`, :code:`bfloat16`, :code:`float32`} + :param input_precision: How to exercise the Tensor Cores for f32 x f32. If + the device does not have Tensor Cores or the inputs are not of dtype f32, + this option is ignored. For devices that do have tensor cores, the + default precision is tf32. + :type input_precision: string. Available options for nvidia: :code:`"tf32"`, :code:`"tf32x3"`, :code:`"ieee"`. Default: :code:`"tf32"`. Avaliable options for amd: :code:`"ieee"`. + :param allow_tf32: *Deprecated.* If true, input_precision is set to "tf32". + Only one of :code:`input_precision` and :code:`allow_tf32` can be + specified (i.e. at least one must be :code:`None`). + """ + assert input_precision is None or allow_tf32 is None, "Only one of input_precision and allow_tf32 can be specified" + if input_precision is None: + supports_tf32 = _builder and "tf32" in _builder.options.allowed_dot_input_precisions + default_precision = "tf32" if (supports_tf32 and (allow_tf32 or allow_tf32 is None)) else "ieee" + input_precision = os.getenv("TRITON_F32_DEFAULT", default_precision) + + input_precision = _constexpr_to_value(input_precision) + out_dtype = _constexpr_to_value(out_dtype) + max_num_imprecise_acc = _constexpr_to_value(max_num_imprecise_acc) + return semantic.dot(input, other, acc, input_precision, max_num_imprecise_acc, out_dtype, _builder) + + +# ----------------------- +# Non-Atomic Memory Operations +# ----------------------- + + +@builtin +def load(pointer, mask=None, other=None, boundary_check=(), padding_option="", cache_modifier="", eviction_policy="", + volatile=False, _builder=None): + """ + Return a tensor of data whose values are loaded from memory at location defined by `pointer`: + + (1) If `pointer` is a single element pointer, a scalar is be loaded. In + this case: + + - `mask` and `other` must also be scalars, + - `other` is implicitly typecast to `pointer.dtype.element_ty`, and + - `boundary_check` and `padding_option` must be empty. + + (2) If `pointer` is an N-dimensional tensor of pointers, an + N-dimensional tensor is loaded. In this case: + + - `mask` and `other` are implicitly broadcast to `pointer.shape`, + - `other` is implicitly typecast to `pointer.dtype.element_ty`, and + - `boundary_check` and `padding_option` must be empty. + + (3) If `pointer` is a block pointer defined by `make_block_ptr`, a + tensor is loaded. In this case: + + - `mask` and `other` must be None, and + - `boundary_check` and `padding_option` can be specified to control + the behavior of out-of-bound access. + + :param pointer: Pointer to the data to be loaded + :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` + :param mask: if `mask[idx]` is false, do not load the data at address `pointer[idx]` + (must be `None` with block pointers) + :type mask: Block of `triton.int1`, optional + :param other: if `mask[idx]` is false, return `other[idx]` + :type other: Block, optional + :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check + :type boundary_check: tuple of ints, optional + :param padding_option: should be one of {"", "zero", "nan"}, do padding while out of bound + :param cache_modifier: changes cache option in NVIDIA PTX + :type cache_modifier: str, optional + :param eviction_policy: changes eviction policy in NVIDIA PTX + :type eviction_policy: str, optional + :param volatile: changes volatile option in NVIDIA PTX + :type volatile: bool, optional + """ + # `mask` and `other` can be constexpr + mask = _constexpr_to_value(mask) + other = _constexpr_to_value(other) + if mask is not None: + mask = _to_tensor(mask, _builder) + if other is not None: + other = _to_tensor(other, _builder) + padding_option = _constexpr_to_value(padding_option) + cache_modifier = _constexpr_to_value(cache_modifier) + eviction_policy = _constexpr_to_value(eviction_policy) + volatile = _constexpr_to_value(volatile) + return semantic.load(pointer, mask, other, boundary_check, padding_option, cache_modifier, eviction_policy, + volatile, _builder) + + +@builtin +def _experimental_descriptor_load(desc_pointer, offsets, shape, dtype, _builder=None): + """ + Experimental feature to access TMA descriptors loads. This is an escape hatch to easily exercise TTGIR operations. + This will be removed in the future and shouldn't be used in production code. + + This loads a tensor of data based on the descriptor and offsets. + """ + type = block_type(dtype, shape) + return semantic.descriptor_load(desc_pointer, offsets, "", "", type, _builder) + + +@builtin +def _experimental_descriptor_store(desc_pointer, value, offsets, _builder=None): + """ + Experimental feature to access TMA descriptors stores. This is an escape hatch to easily exercise TTGIR operations. + This will be removed in the future and shouldn't be used in production code. + + This stores a tensor of data based on the descriptor and offsets. + """ + return semantic.descriptor_store(desc_pointer, value, offsets, _builder) + + +@_tensor_member_fn +@builtin +def store(pointer, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="", _builder=None): + """ + Store a tensor of data into memory locations defined by `pointer`. + + (1) If `pointer` is a single element pointer, a scalar is stored. In + this case: + + - `mask` must also be scalar, and + - `boundary_check` and `padding_option` must be empty. + + (2) If `pointer` is an N-dimensional tensor of pointers, an + N-dimensional block is stored. In this case: + + - `mask` is implicitly broadcast to `pointer.shape`, and + - `boundary_check` must be empty. + + (3) If `pointer` is a block pointer defined by `make_block_ptr`, a block + of data is stored. In this case: + + - `mask` must be None, and + - `boundary_check` can be specified to control the behavior of out-of-bound access. + + `value` is implicitly broadcast to `pointer.shape` and typecast to `pointer.dtype.element_ty`. + + :param pointer: The memory location where the elements of `value` are stored + :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` + :param value: The tensor of elements to be stored + :type value: Block + :param mask: If `mask[idx]` is false, do not store `value[idx]` at `pointer[idx]` + :type mask: Block of triton.int1, optional + :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check + :type boundary_check: tuple of ints, optional + :param cache_modifier: changes cache option in NVIDIA PTX + :type cache_modifier: str, optional + :param eviction_policy: changes eviction policy in NVIDIA PTX + :type eviction_policy: str, optional + """ + # `value` can be constexpr + value = _to_tensor(value, _builder) + mask = _constexpr_to_value(mask) + if mask is not None: + mask = _to_tensor(mask, _builder) + cache_modifier = _constexpr_to_value(cache_modifier) + eviction_policy = _constexpr_to_value(eviction_policy) + return semantic.store(pointer, value, mask, boundary_check, cache_modifier, eviction_policy, _builder) + + +@builtin +def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None): + """ + Returns a pointer to a block in a parent tensor + + :param base: The base pointer to the parent tensor + :param shape: The shape of the parent tensor + :param strides: The strides of the parent tensor + :param offsets: The offsets to the block + :param block_shape: The shape of the block + :param order: The order of the original data format + """ + return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder) + + +@_tensor_member_fn +@builtin +def advance(base, offsets, _builder=None): + """ + Advance a block pointer + + :param base: the block pointer to advance + :param offsets: the offsets to advance, a tuple by dimension + """ + return semantic.advance(base, offsets, _builder) + + +# ----------------------- +# Atomic Memory Operations +# ----------------------- + + +def _add_atomic_docstr(name: str, has_cmp: bool = False) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = f""" + Performs an atomic {name} at the memory location specified by :code:`pointer`. + + Return the data stored at :code:`pointer` before the atomic operation. + + :param pointer: The memory locations to operate on + :type pointer: Block of dtype=triton.PointerDType""" + if has_cmp: + docstr += """ + :param cmp: The values expected to be found in the atomic object + :type cmp: Block of dtype=pointer.dtype.element_ty""" + docstr += """ + :param val: The values with which to perform the atomic operation + :type val: Block of dtype=pointer.dtype.element_ty + :param sem: Memory semantics to use ("ACQUIRE_RELEASE" (default), + "ACQUIRE", "RELEASE", or "RELAXED") + :type sem: str + :param scope: Scope of threads that observe synchronizing effect of the + atomic operation ("GPU" (default), "CTA", or "SYSTEM") + :type scope: str + """ + func.__doc__ = docstr + return func + + return _decorator + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("compare-and-swap", has_cmp=True) +def atomic_cas(pointer, cmp, val, sem=None, scope=None, _builder=None): + cmp = _to_tensor(cmp, _builder) + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_cas(pointer, cmp, val, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("exchange") +def atomic_xchg(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_xchg(pointer, val, mask, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("add") +def atomic_add(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_add(pointer, val, mask, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("max") +def atomic_max(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_max(pointer, val, mask, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("min") +def atomic_min(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_min(pointer, val, mask, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("logical and") +def atomic_and(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_and(pointer, val, mask, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("logical or") +def atomic_or(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_or(pointer, val, mask, sem, scope, _builder) + + +@_tensor_member_fn +@builtin +@_add_atomic_docstr("logical xor") +def atomic_xor(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + mask = _constexpr_to_value(mask) + return semantic.atomic_xor(pointer, val, mask, sem, scope, _builder) + + +# ----------------------- +# Conditioning +# ----------------------- + + +@builtin +def where(condition, x, y, _builder=None): + """ + Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`. + + Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`. + + If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead. + + The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`. + :code:`x` and :code:`y` must have the same data type. + + :param condition: When True (nonzero), yield x, otherwise yield y. + :type condition: Block of triton.bool + :param x: values selected at indices where condition is True. + :param y: values selected at indices where condition is False. + """ + condition = _to_tensor(condition, _builder) + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.where(condition, x, y, _builder) + + +# ----------------------- +# Math +# ----------------------- + + +@builtin +def minimum(x, y, propagate_nan: constexpr = PropagateNan.NONE, _builder=None): + """ + Computes the element-wise minimum of :code:`x` and :code:`y`. + + :param x: the first input tensor + :type x: Block + :param y: the second input tensor + :type y: Block + :param propagate_nan: whether to propagate NaN values. + :type propagate_nan: tl.PropagateNan + + .. seealso:: :class:`tl.PropagateNan` + """ + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + x = _promote_bfloat16_to_float32(x, _builder=_builder) + y = _promote_bfloat16_to_float32(y, _builder=_builder) + propagate_nan = _constexpr_to_value(propagate_nan) + return semantic.minimum(x, y, propagate_nan, _builder) + + +@builtin +def maximum(x, y, propagate_nan: constexpr = PropagateNan.NONE, _builder=None): + """ + Computes the element-wise maximum of :code:`x` and :code:`y`. + + :param x: the first input tensor + :type x: Block + :param y: the second input tensor + :type y: Block + :param propagate_nan: whether to propagate NaN values. + :type propagate_nan: tl.PropagateNan + + .. seealso:: :class:`tl.PropagateNan` + """ + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + x = _promote_bfloat16_to_float32(x, _builder=_builder) + y = _promote_bfloat16_to_float32(y, _builder=_builder) + propagate_nan = _constexpr_to_value(propagate_nan) + return semantic.maximum(x, y, propagate_nan, _builder) + + +@builtin +def clamp(x, min, max, propagate_nan: constexpr = PropagateNan.NONE, _builder=None): + """ + Clamps the input tensor :code:`x` within the range [min, max]. + Behavior when :code:`min` > :code:`max` is undefined. + + :param x: the input tensor + :type x: Block + :param min: the lower bound for clamping + :type min: Block + :param max: the upper bound for clamping + :type max: Block + :param propagate_nan: whether to propagate NaN values. Applies only to the :code:`x` tensor. + If either :code:`min` or :code:`max` is NaN, the result is undefined. + :type propagate_nan: tl.PropagateNan + + .. seealso:: :class:`tl.PropagateNan` + """ + x = _to_tensor(x, _builder) + min = _to_tensor(min, _builder) + max = _to_tensor(max, _builder) + x = _promote_bfloat16_to_float32(x, _builder=_builder) + min = _promote_bfloat16_to_float32(min, _builder=_builder) + max = _promote_bfloat16_to_float32(max, _builder=_builder) + + propagate_nan = _constexpr_to_value(propagate_nan) + + return semantic.clamp(x, min, max, propagate_nan, _builder) + + +# ----------------------- +# Reductions +# ----------------------- + + +def _add_reduction_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` + + :param input: the input values + :param axis: the dimension along which the reduction should be done + :param keep_dims: if true, keep the reduced dimensions with length 1""" + if return_indices_arg is not None: + docstr += f""" + :param {return_indices_arg}: if true, return index corresponding to the {name} value""" + if tie_break_arg is not None: + docstr += f""" + :param {tie_break_arg}: if true, return the left-most indices in case of ties for values that aren't NaN""" + + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@contextmanager +def _insertion_guard(builder): + ip = builder.get_insertion_point() + yield + builder.restore_insertion_point(ip) + + +@_tensor_member_fn +@builtin +def reduce(input, axis, combine_fn, keep_dims=False, _builder=None, _generator=None): + """Applies the combine_fn to all elements in :code:`input` tensors along the provided :code:`axis` + + :param input: the input tensor, or tuple of tensors + :param axis: the dimension along which the reduction should be done. If None, reduce all dimensions + :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) + :param keep_dims: if true, keep the reduced dimensions with length 1 + + """ + if isinstance(input, tensor): + return reduce((input, ), axis, combine_fn, keep_dims=keep_dims, _builder=_builder, _generator=_generator)[0] + + def make_combine_region(reduce_op): + in_scalar_tys = [t.type.scalar for t in input] + prototype = function_type(in_scalar_tys, in_scalar_tys * 2) + + region = reduce_op.get_region(0) + with _insertion_guard(_builder): + param_types = [ty.to_ir(_builder) for ty in prototype.param_types] + block = _builder.create_block_with_parent(region, param_types) + args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] + results = _generator.call_JitFunction(combine_fn, args, kwargs={}) + if isinstance(results, tensor): + handles = [results.handle] + else: + handles = [r.handle for r in results] + _builder.create_reduce_ret(*handles) + + def expand_ndims(t, ndims): + for _ in builtins.range(ndims): + t = expand_dims(t, 0, _builder=_builder) + return t + + axis = _constexpr_to_value(axis) + keep_dims = _constexpr_to_value(keep_dims) + if axis is not None: + axis = _wrap_axis(axis, len(input[0].shape)) + ret = semantic.reduction(input, axis, make_combine_region, _builder) + if keep_dims: + if axis is not None: + ret = tuple(expand_dims(t, axis, _builder=_builder) for t in ret) + else: + ret = tuple(expand_ndims(t, len(input[0].shape)) for t in ret) + return ret + + +@builtin +def _promote_bfloat16_to_float32(t, _builder=None): + scalar_ty = t.type.scalar + + # hardware doesn't support FMAX, FMIN, CMP for bfloat16 + if scalar_ty is bfloat16: + return t.to(float32, _builder=_builder) + return t + + +@builtin +def _reduce_with_indices(input, axis, combine_fn, keep_dims=False, _builder=None, _generator=None): + axis = _constexpr_to_value(axis) + n = input.shape[axis] + index = arange(0, n, _builder=_builder) + + if len(input.shape) > 1: + # Broadcast index across the non-reduced axes + axes_to_expand = [constexpr(d) for d in builtins.range(len(input.shape))] + del axes_to_expand[axis] + index = expand_dims(index, axes_to_expand, _builder=_builder) + index = broadcast_to(index, input.shape, _builder=_builder) + + rvalue, rindices = reduce((input, index), axis, combine_fn, keep_dims=keep_dims, _builder=_builder, + _generator=_generator) + return rvalue, rindices + + +# ----------------------- +# Scans +# ----------------------- + + +def _add_scan_docstr(name: str) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` + + :param input: the input values + :param axis: the dimension along which the scan should be done""" + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@_tensor_member_fn +@builtin +def associative_scan(input, axis, combine_fn, reverse=False, _builder=None, _generator=None): + """Applies the combine_fn to each elements with a carry in :code:`input` tensors along the provided :code:`axis` and update the carry + + :param input: the input tensor, or tuple of tensors + :param axis: the dimension along which the reduction should be done + :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) + :param reverse: apply the associative scan in the reverse direction along axis. + + """ + if isinstance(input, tensor): + return associative_scan((input, ), axis, combine_fn, reverse, _builder=_builder, _generator=_generator)[0] + + def make_combine_region(scan_op): + in_scalar_tys = [t.type.scalar for t in input] + prototype = function_type(in_scalar_tys, in_scalar_tys * 2) + + region = scan_op.get_region(0) + with _insertion_guard(_builder): + param_types = [ty.to_ir(_builder) for ty in prototype.param_types] + block = _builder.create_block_with_parent(region, param_types) + args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] + results = _generator.call_JitFunction(combine_fn, args, kwargs={}) + if isinstance(results, tensor): + handles = [results.handle] + else: + handles = [r.handle for r in results] + _builder.create_scan_ret(*handles) + + axis = _constexpr_to_value(axis) + if axis is not None: + axis = _wrap_axis(axis, len(input[0].shape)) + return semantic.associative_scan(input, axis, make_combine_region, reverse, _builder) + + +@_tensor_member_fn +@builtin +def histogram(input, num_bins, _builder=None, _generator=None): + """computes an histogram based on input tensor with num_bins bins, the bins have a width of 1 and start at 0. + + :param input: the input tensor + :param num_bins: number of histogram bins + + """ + num_bins = _constexpr_to_value(num_bins) + return semantic.histogram(input, num_bins, _builder) + + +# ----------------------- +# Compiler Hint Ops +# ----------------------- + + +@builtin +def debug_barrier(_builder=None): + ''' + Insert a barrier to synchronize all threads in a block. + ''' + return semantic.debug_barrier(_builder) + + +@builtin +def multiple_of(input, values, _builder=None): + """ + Let the compiler know that the values in :code:`input` are all multiples of :code:`value`. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.multiple_of(input, values) + + +@builtin +def max_contiguous(input, values, _builder=None): + """ + Let the compiler know that the `value` first values in :code:`input` are contiguous. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.max_contiguous(input, values) + + +@builtin +def max_constancy(input, values, _builder=None): + """ + Let the compiler know that the `value` first values in :code:`input` are constant. + + e.g. if :code:`values` is [4], then each group of 4 values in :code:`input` should all be equal, + for example [0, 0, 0, 0, 1, 1, 1, 1]. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.max_constancy(input, values) + + +# ----------------------- +# Debugging functions +# ----------------------- + + +@builtin +def static_print(*values, sep: str = " ", end: str = "\n", file=None, flush=False, _builder=None): + ''' + Print the values at compile time. The parameters are the same as the builtin :code:`print`. + + NOTE: Calling the Python builtin :code:`print` is not the same as calling this, it instead maps to :code:`device_print`, + which has special requirements for the arguments. + + .. highlight:: python + .. code-block:: python + + tl.static_print(f"{BLOCK_SIZE=}") + ''' + pass + + +@builtin +def static_assert(cond, msg="", _builder=None): + ''' + Assert the condition at compile time. Does not require that the :code:`TRITON_DEBUG` environment variable + is set. + + .. highlight:: python + .. code-block:: python + + tl.static_assert(BLOCK_SIZE == 1024) + ''' + pass + + +@builtin +def device_print(prefix, *args, hex=False, _builder=None): + ''' + Print the values at runtime from the device. String formatting does not work for runtime values, so you should + provide the values you want to print as arguments. The first value must be a string, all following values must + be scalars or tensors. + + Calling the Python builtin :code:`print` is the same as calling this function, and the requirements for the arguments will match + this function (not the normal requirements for :code:`print`). + + .. highlight:: python + .. code-block:: python + + tl.device_print("pid", pid) + print("pid", pid) + + On CUDA, printfs are streamed through a buffer of limited size (on one host, + we measured the default as 6912 KiB, but this may not be consistent across + GPUs and CUDA versions). If you notice some printfs are being dropped, you + can increase the buffer size by calling + + .. highlight:: python + .. code-block:: python + + triton.runtime.driver.active.utils.set_printf_fifo_size(size_bytes) + + CUDA may raise an error if you try to change this value after running a + kernel that uses printfs. The value set here may only affect the current + device (so if you have multiple GPUs, you'd need to call it multiple times). + + :param prefix: a prefix to print before the values. This is required to be a string literal. + :param args: the values to print. They can be any tensor or scalar. + :param hex: print all values as hex instead of decimal + ''' + import string + prefix = _constexpr_to_value(prefix) + assert isinstance(prefix, str), f"{prefix} is not string" + b_ascii = True + for ch in prefix: + if ch not in string.printable: + b_ascii = False + break + assert b_ascii, f"{prefix} is not an ascii string" + new_args = [] + for arg in args: + new_args.append(_to_tensor(arg, _builder)) + return semantic.device_print(prefix, new_args, hex, _builder) + + +@builtin +def device_assert(cond, msg="", _builder=None): + ''' + Assert the condition at runtime from the device. Requires that the environment variable :code:`TRITON_DEBUG` + is set to a value besides :code:`0` in order for this to have any effect. + + Using the Python :code:`assert` statement is the same as calling this function, except that the second argument + must be provided and must be a string, e.g. :code:`assert pid == 0, "pid != 0"`. The environment variable must + be set for this :code:`assert` statement to have any effect. + + .. highlight:: python + .. code-block:: python + + tl.device_assert(pid == 0) + assert pid == 0, f"pid != 0" + + :param cond: the condition to assert. This is required to be a boolean tensor. + :param msg: the message to print if the assertion fails. This is required to be a string literal. + ''' + msg = _constexpr_to_value(msg) + import inspect + frame = inspect.currentframe() + module = inspect.getmodule(frame) + # The triton function module doesn't have the name attribute. + # We use this trick to find the caller. + while hasattr(module, "__name__"): + frame = frame.f_back + module = inspect.getmodule(frame) + lineno = 0 + func_name = 'unknown' + file_name = 'unknown' + if frame is not None and frame.f_back is not None: + func_name = frame.f_code.co_name + file_name = frame.f_back.f_code.co_filename + # TODO: The line number currently indicates the line + # where the triton function is called but not where the + # device_assert is called. Need to enhance this. + lineno = frame.f_back.f_lineno + return semantic.device_assert(_to_tensor(cond, _builder), msg, file_name, func_name, lineno, _builder) + + +@builtin +def inline_asm_elementwise(asm: str, constraints: str, args: Sequence, dtype: Union[dtype, Sequence[dtype]], + is_pure: bool, pack: int, _builder=None): + ''' + Execute inline assembly over a tensor. Essentially, this is :code:`map` + where the function is inline assembly. + + The input tensors :code:`args` are implicitly broadcasted to the same shape. + + :code:`dtype` can be a tuple of types, in which case the output is a + tuple of tensors. + + Each invocation of the inline asm processes :code:`pack` elements at a + time. Exactly which set of inputs a block receives is unspecified. + Input elements of size less than 4 bytes are packed into 4-byte + registers. + + This op does not support empty :code:`dtype` -- the inline asm must + return at least one tensor, even if you don't need it. You can work + around this by returning a dummy tensor of arbitrary type; it shouldn't + cost you anything if you don't use it. + + Example using + [PTX](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html) + assembly: + + .. highlight:: python + .. code-block:: python + + @triton.jit + def kernel(A, B, C, D, BLOCK: tl.constexpr): + a = tl.load(A + tl.arange(0, BLOCK)) # uint8 tensor + b = tl.load(B + tl.arange(0, BLOCK)) # float32 tensor + + # For each (a,b) in zip(a,b), perform the following: + # - Let ai be `a` converted to int32. + # - Let af be `a` converted to float. + # - Let m be the max of ai and b. + # - Return ai and mi. + # Do the above 4 elements at a time. + (c, d) = tl.inline_asm_elementwise( + asm=""" + { + // Unpack `a` into `ai`. + .reg .b8 tmp<4>; + mov.b32 {tmp0, tmp1, tmp2, tmp3}, $8; + cvt.u32.u8 $0, tmp0; + cvt.u32.u8 $1, tmp1; + cvt.u32.u8 $2, tmp2; + cvt.u32.u8 $3, tmp3; + } + // Convert `ai` to float. + cvt.rn.f32.s32 $4, $0; + cvt.rn.f32.s32 $5, $1; + cvt.rn.f32.s32 $6, $2; + cvt.rn.f32.s32 $7, $3; + // Take max of `ai` and `b`. + max.f32 $4, $4, $9; + max.f32 $5, $5, $10; + max.f32 $6, $6, $11; + max.f32 $7, $7, $12; + """, + constraints=( + # 8 output registers, namely + # $0=ai0, $1=ai1, $2=ai2, $3=ai3, + # $4=m0, $5=m1, $6=m2, $7=m3. + "=r,=r,=r,=r,=r,=r,=r,=r," + # 5 input registers, namely + # $8=ai, + # $9=b0, $10=b1, $11=b2, $12=b3. + # The four elements from `a` are all packed into one register. + "r,r,r,r,r"), + args=[a, b], + dtype=(tl.int32, tl.float32), + is_pure=True, + pack=4, + ) + tl.store(C + tl.arange(0, BLOCK), c) + tl.store(D + tl.arange(0, BLOCK), d) + + :param asm: assembly to run. Must match target's assembly format. + :param constraints: asm constraints in + [LLVM format](https://llvm.org/docs/LangRef.html#inline-asm-constraint-string) + :param args: the input tensors, whose values are passed to the asm block + :param dtype: the element type(s) of the returned tensor(s) + :param is_pure: if true, the compiler assumes the asm block has no side-effects + :param pack: the number of elements to be processed by one instance of inline assembly + :param _builder: the builder + :return: one tensor or a tuple of tensors of the given dtypes + ''' + asm = _constexpr_to_value(asm) + constraints = _constexpr_to_value(constraints) + pack = _constexpr_to_value(pack) + is_pure = _constexpr_to_value(is_pure) + + # Wrap `dtype` in a tuple if it's not already. + try: + iter(dtype) # type: ignore + has_multiple_outputs = True + except TypeError: + has_multiple_outputs = False + dtype = (dtype, ) # type: ignore + + dtype = typing.cast(Sequence[_DtypeClass], dtype) + + res_tys = dtype + if dispatch_args := [_to_tensor(arg, _builder) for arg in args]: + bin_op_type_checking = partial( + semantic.binary_op_type_checking_impl, + builder=_builder, + arithmetic_check=False, + allow_lhs_ptr=True, + allow_rhs_ptr=True, + ) + broadcast_arg = dispatch_args[0] + # Get the broadcast shape over all the arguments + for item in dispatch_args: + _, broadcast_arg = bin_op_type_checking(item, broadcast_arg) + if broadcast_arg.shape: + # Change the shape of each argument based on the broadcast shape + for i, item in enumerate(dispatch_args): + dispatch_args[i], _ = bin_op_type_checking(item, broadcast_arg) + res_tys = [block_type(dt, broadcast_arg.shape) for dt in dtype] + handles = [t.handle for t in dispatch_args] + call = _builder.create_inline_asm(asm, constraints, handles, [ty.to_ir(_builder) for ty in res_tys], is_pure, pack) + + if not has_multiple_outputs: + return tensor(call.get_result(0), res_tys[0]) + return tuple(tensor(call.get_result(i), ty) for i, ty in enumerate(res_tys)) + + +# ----------------------- +# Iterators +# ----------------------- + + +class static_range: + """ + Iterator that counts upward forever. + + .. highlight:: python + .. code-block:: python + + @triton.jit + def kernel(...): + for i in tl.static_range(10): + ... + :note: This is a special iterator used to implement similar semantics to Python's :code:`range` in the context of + :code:`triton.jit` functions. In addition, it also guides the compiler to unroll the loop aggressively. + :param arg1: the start value. + :param arg2: the end value. + :param step: the step value. + """ + + def __init__(self, arg1, arg2=None, step=None): + assert isinstance(arg1, constexpr) + if step is None: + self.step = constexpr(1) + else: + assert isinstance(step, constexpr) + self.step = step + if arg2 is None: + self.start = constexpr(0) + self.end = arg1 + else: + assert isinstance(arg2, constexpr) + self.start = arg1 + self.end = arg2 + + def __iter__(self): + raise RuntimeError("static_range can only be used in @triton.jit'd functions") + + def __next__(self): + raise RuntimeError("static_range can only be used in @triton.jit'd functions") + + +class range: + """ + Iterator that counts upward forever. + + .. highlight:: python + .. code-block:: python + + @triton.jit + def kernel(...): + for i in tl.range(10, num_stages=3): + ... + :note: This is a special iterator used to implement similar semantics to Python's :code:`range` in the context of + :code:`triton.jit` functions. In addition, it allows user to pass extra attributes to the compiler. + :param arg1: the start value. + :param arg2: the end value. + :param step: the step value. + :param num_stages: pipeline the loop into this many stages (so there are + :code:`num_stages` iterations of the loop in flight at once). + + Note this is subtly different than passing :code:`num_stages` as a + kernel argument. The kernel argument only pipelines loads that feed + into :code:`dot` operations, while this attribute tries to pipeline most + (though not all) loads in this loop. + """ + + def __init__(self, arg1, arg2=None, step=None, num_stages=None): + if step is None: + self.step = constexpr(1) + else: + self.step = step + if arg2 is None: + self.start = constexpr(0) + self.end = arg1 + else: + self.start = arg1 + self.end = arg2 + self.num_stages = num_stages + + def __iter__(self): + raise RuntimeError("tl.range can only be used in @triton.jit'd functions") + + def __next__(self): + raise RuntimeError("tl.range can only be used in @triton.jit'd functions") + + +# ----------------------- +# Extern functions +# ----------------------- + + +def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, + is_pure: bool, _builder=None): + ''' + Dispatch a function to a library + :param func: the function to dispatch + :param lib_name: the name of the library + :param lib_path: the path of the library + :param args: the arguments of the function + :param arg_type_symbol_dict: the type of the arguments + :param ret_shape: the shape of the return value + :param _builder: the builder + :return: the return value of the function + ''' + if len(arg_type_symbol_dict) == 0: + raise ValueError("arg_type_symbol_dict is empty") + + num_args = len(list(arg_type_symbol_dict.keys())[0]) + if len(args) != num_args: + raise ValueError(f"length of input args does not match." + f"Expect {len(args)}, got {num_args}") + + arg_types = [] + arg_list = [] + for arg in args: + if isinstance(arg, tensor): + arg_types.append(arg.dtype) + arg_list.append(arg.handle) + else: + arg_types.append(type(arg)) + arg_list.append(arg) + arg_types = tuple(arg_types) + + if arg_types not in arg_type_symbol_dict: + raise ValueError(f"input arg type does not match." + f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}") + else: + symbol = arg_type_symbol_dict[arg_types][0] + ret_type = arg_type_symbol_dict[arg_types][1] + if ret_shape: + ret_type = block_type(ret_type, ret_shape) + return tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder), is_pure), ret_type) + + +@builtin +def extern_elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, is_pure: bool, + _builder=None): + ''' + Dispatch an elementwise function to a library + :param lib_name: the name of the library + :param lib_path: the path of the library + :param args: the arguments of the function + :param arg_type_symbol_dict: the type of the arguments + :param is_pure: whether the function is pure + :param _builder: the builder + :return: the return value of the function + ''' + dispatch_args = args.copy() + all_scalar = True + ret_shape = None + arg_types = [] + for i in builtins.range(len(dispatch_args)): + dispatch_args[i] = _to_tensor(dispatch_args[i], _builder) + arg_types.append(dispatch_args[i].dtype) + if dispatch_args[i].type.is_block(): + all_scalar = False + if len(arg_types) > 0: + arg_types = tuple(arg_types) + arithmetic_check = True + # If there's a type tuple that is not supported by the library, we will do arithmetic check + if arg_types in arg_type_symbol_dict: + arithmetic_check = False + broadcast_arg = dispatch_args[0] + # Get the broadcast shape over all the arguments + for item in dispatch_args: + _, broadcast_arg = semantic.binary_op_type_checking_impl(item, broadcast_arg, _builder, + arithmetic_check=arithmetic_check) + # Change the shape of each argument based on the broadcast shape + for i in builtins.range(len(dispatch_args)): + dispatch_args[i], _ = semantic.binary_op_type_checking_impl(dispatch_args[i], broadcast_arg, _builder, + arithmetic_check=arithmetic_check) + if not all_scalar: + ret_shape = broadcast_arg.shape + func = _builder.create_extern_elementwise + return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, is_pure, _builder) + + +def binary_op_type_legalization(lhs, rhs, builder): + ''' + Convert both operands to a single common type + :param lhs: the left operand + :param rhs: the right operand + :param builder: the builder + ''' + return semantic.binary_op_type_checking_impl(lhs, rhs, builder) + + +def extern(fn): + """A decorator for external functions.""" + return builtin(fn) diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/__init__.py b/vllm/lib/python3.10/site-packages/triton/language/extra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14e1778d2fdc2a265575573b6fa4aed026e11b9a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/extra/__init__.py @@ -0,0 +1,4 @@ +from . import cuda +from . import hip + +__all__ = ['cuda', 'hip'] diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/cuda/utils.py b/vllm/lib/python3.10/site-packages/triton/language/extra/cuda/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..01bc040b289feea13190672677571b6e7dffa08f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/extra/cuda/utils.py @@ -0,0 +1,109 @@ +from triton.language import core + + +@core.extern +def globaltimer(_builder=None): + return core.inline_asm_elementwise("mov.u64 $0, %globaltimer;", "=l", [], dtype=core.int64, is_pure=False, pack=1, + _builder=_builder) + + +@core.extern +def smid(_builder=None): + return core.inline_asm_elementwise("mov.u32 $0, %smid;", "=r", [], dtype=core.int32, is_pure=True, pack=1, + _builder=_builder) + + +@core.builtin +def num_threads(_builder=None): + return core.constexpr(_builder.options.num_warps * 32) + + +@core.builtin +def num_warps(_builder=None): + return core.constexpr(_builder.options.num_warps) + + +# ----- FP8E4M3B15 ------ +# This data-type is a variant of the standard FP8E4M3 format. +# It was designed for fast software conversion to FP16 on +# nvidia GPUs that do not support it natively. +# This is the same format as FP8E4M3Nv, but: +# - the exponent bias is 15 instead of 7 +# - 0xff and 0x7f are mapped to +-1.750 instead of +-nan +@core.builtin +def convert_fp8e4b15_to_float16(arg, _builder=None): + return core.inline_asm_elementwise( + "{ \n" + ".reg .b32 a<2>, b<2>; \n" + "prmt.b32 a0, 0, $2, 0x5746; \n" + "and.b32 b0, a0, 0x7f007f00; \n" + "and.b32 b1, a0, 0x00ff00ff; \n" + "and.b32 a1, a0, 0x00800080; \n" + "shr.b32 b0, b0, 1; \n" + "add.u32 b1, b1, a1; \n" + "lop3.b32 $0, b0, 0x80008000, a0, 0xf8; \n" + "shl.b32 $1, b1, 7; \n" + "} \n", "=r,=r,r", [arg], dtype=core.float16, is_pure=True, pack=4, + _builder=_builder) + + +@core.builtin +def convert_float16_to_fp8e4b15(arg, has_minx2, _builder=None): + asm = """{ + .reg .pred p<4>; + .reg .b32 a<2>, b<2>; + .reg .b16 c<4>; + .reg .b16 max_val_f16; + .reg .b32 max_val_f16x2; + mov.b16 max_val_f16, 0x3F00; + mov.b32 max_val_f16x2, 0x3F003F00; + and.b32 a0, $1, 0x7fff7fff; + and.b32 a1, $2, 0x7fff7fff;""" + if has_minx2: + asm += """min.f16x2 a0, a0, max_val_f16x2; + min.f16x2 a1, a1, max_val_f16x2;""" + else: + asm += """setp.lt.f16x2 p0|p1, a0, max_val_f16x2; + setp.lt.f16x2 p2|p3, a1, max_val_f16x2; + mov.b32 {c0, c1}, a0; + mov.b32 {c2, c3}, a1; + selp.b16 c0, c0, max_val_f16, p0; + selp.b16 c1, c1, max_val_f16, p1; + selp.b16 c2, c2, max_val_f16, p2; + selp.b16 c3, c3, max_val_f16, p3; + mov.b32 a0, {c0, c1}; + mov.b32 a1, {c2, c3};""" + asm += """mad.lo.u32 a0, a0, 2, 0x00800080; + mad.lo.u32 a1, a1, 2, 0x00800080; + lop3.b32 b0, $1, 0x80008000, a0, 0xea; + lop3.b32 b1, $2, 0x80008000, a1, 0xea; + prmt.b32 $0, b0, b1, 0x7531; + }""" + return core.inline_asm_elementwise(asm, "=r,r,r", [arg], dtype=core.float8e4b15, is_pure=True, pack=4, + _builder=_builder) + + +@core.builtin +def convert_custom_float8(arg, dst_ty, fp_downcast_rounding, has_minx2, _builder=None): + if arg.type.scalar.is_fp8e4b15(): + upcast_val = convert_fp8e4b15_to_float16(arg, _builder=_builder) + if dst_ty.scalar.is_fp32(): + upcast_val = upcast_val.to(core.float32, _builder=_builder) + return upcast_val + + assert arg.type.scalar.is_fp16() or arg.type.scalar.is_fp32() + downcast_val = arg + if arg.type.scalar.is_fp32(): + downcast_val = downcast_val.to(core.float16, fp_downcast_rounding="rtz", _builder=_builder) + downcast_val = convert_float16_to_fp8e4b15(downcast_val, has_minx2=has_minx2, _builder=_builder) + return downcast_val + + +@core.builtin +def convert_custom_float8_sm80(arg, dst_ty, fp_downcast_rounding=None, _builder=None): + return convert_custom_float8(arg, dst_ty, fp_downcast_rounding, has_minx2=True, _builder=_builder) + + +@core.builtin +def convert_custom_float8_sm70(arg, dst_ty, fp_downcast_rounding=None, _builder=None): + return convert_custom_float8(arg, dst_ty, fp_downcast_rounding, has_minx2=False, _builder=_builder) diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__init__.py b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..229b57d87d6580c73f74dcb1556d33c0a1a2df60 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__init__.py @@ -0,0 +1,3 @@ +from . import libdevice + +__all__ = ["libdevice"] diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57154cd0f20253158d3b0ec58b3f442c7f12496c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__pycache__/libdevice.cpython-310.pyc b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__pycache__/libdevice.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ca86e80c527c5a1ae1936d053d0841ff9330e42 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/__pycache__/libdevice.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/hip/libdevice.py b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/libdevice.py new file mode 100644 index 0000000000000000000000000000000000000000..02e5d2d0b211386838644edcecc4c77b65591f5c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/extra/hip/libdevice.py @@ -0,0 +1,468 @@ +from triton.language import core + + +@core.extern +def abs(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("int32"), ): ("__triton_hip_iabs", core.dtype("int32")), + (core.dtype("int64"), ): ("__triton_hip_iabs", core.dtype("int64")), + (core.dtype("fp32"), ): ("__triton_hip_fabs", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__triton_hip_fabs", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def floor(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_floor_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_floor_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rsqrt(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_rsqrt_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_rsqrt_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ceil(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_ceil_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_ceil_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def trunc(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_trunc_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_trunc_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp2(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_exp2_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_exp2_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_exp_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_exp_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_dividef(arg0, arg1, _builder=None): + return core.extern_elementwise("", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__triton_hip_fast_fdividef", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_sqrt_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_sqrt_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def llrint(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__triton_hip_llrint", core.dtype("int64")), + (core.dtype("fp64"), ): ("__triton_hip_llrint", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def nearbyint(arg0, _builder=None): + return core.extern_elementwise( + "", "", [ + arg0, + ], { + (core.dtype("fp32"), ): ("__ocml_nearbyint_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_nearbyint_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isnan(arg0, _builder=None): + return core.extern_elementwise( + "", "", [ + arg0, + ], { + (core.dtype("fp32"), ): ("__ocml_isnan_f32", core.dtype("int32")), + (core.dtype("fp64"), ): ("__ocml_isnan_f64", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def signbit(arg0, _builder=None): + return core.extern_elementwise( + "", "", [ + arg0, + ], { + (core.dtype("fp32"), ): ("__ocml_signbit_f32", core.dtype("int32")), + (core.dtype("fp64"), ): ("__ocml_signbit_f64", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def copysign(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__ocml_copysign_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__ocml_copysign_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isinf(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_isinf_f32", core.dtype("int32")), + (core.dtype("fp64"), ): ("__ocml_isinf_f64", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def nextafter(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__ocml_nextafter_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__ocml_nextafter_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sin(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_sin_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_sin_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cos(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_cos_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_cos_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tan(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_tan_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_tan_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log2(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_log2_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_log2_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cosh(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_cosh_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_cosh_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sinh(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_sinh_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_sinh_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tanh(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_tanh_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_tanh_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atan2(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__ocml_atan2_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__ocml_atan2_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atan(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_atan_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_atan_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def asin(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_asin_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_asin_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def acos(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_acos_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_acos_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_log_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_log_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log10(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_log10_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_log10_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log1p(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_log1p_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_log1p_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def acosh(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_acosh_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_acosh_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def asinh(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_asinh_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_asinh_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atanh(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_atanh_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_atanh_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def expm1(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_expm1_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_expm1_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hypot(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__ocml_hypot_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__ocml_hypot_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def j0(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_j0_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_j0_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def j1(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_j1_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_j1_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def y0(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_y0_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_y0_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def y1(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_y1_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_y1_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cyl_bessel_i0(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_i0_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_i0_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cyl_bessel_i1(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_i1_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_i1_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erf(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_erf_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_erf_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfinv(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_erfinv_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_erfinv_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfc(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_erfc_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_erfc_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfcx(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_erfcx_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_erfcx_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def lgamma(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_lgamma_f32", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__ocml_lgamma_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ldexp(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__ocml_ldexp_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__ocml_ldexp_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fmod(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__ocml_fmod_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__ocml_fmod_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__ocml_fma_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__ocml_fma_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def pow(arg0, arg1, _builder=None): + return core.extern_elementwise( + "", "", [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__ocml_pown_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__ocml_pown_f64", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__ocml_pow_f32", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__ocml_pow_f64", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ilogb(arg0, _builder=None): + return core.extern_elementwise( + "", "", [arg0], { + (core.dtype("fp32"), ): ("__ocml_ilogb_f32", core.dtype("int32")), + (core.dtype("fp64"), ): ("__ocml_ilogb_f64", core.dtype("int32")), + }, is_pure=True, _builder=_builder) diff --git a/vllm/lib/python3.10/site-packages/triton/language/extra/libdevice.py b/vllm/lib/python3.10/site-packages/triton/language/extra/libdevice.py new file mode 100644 index 0000000000000000000000000000000000000000..625cf3957e56c0abce80cbaa2f5fa79362079770 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/extra/libdevice.py @@ -0,0 +1,1213 @@ +from .cuda import libdevice as cuda_libdevice +from .hip import libdevice as hip_libdevice +from triton.language import core +from functools import wraps +from typing import TypeVar + +T = TypeVar('T') + + +def dispatch(fn: T) -> T: + """Dispatch a function to a correct implementation.""" + assert callable(fn) + + @wraps(fn) + def wrapper(*args, **kwargs): + _backend = kwargs["_builder"].options.backend_name + if _backend == 'cuda': + _curr_libdevice_module = cuda_libdevice + elif _backend == 'hip': + _curr_libdevice_module = hip_libdevice + else: + raise RuntimeError('unknown backend') + + try: + _impl = getattr(_curr_libdevice_module, fn.__name__) + except AttributeError: + raise RuntimeError(f'`{_backend}` does not provide support for `{fn.__name__}` extra function') + + return _impl(*args, **kwargs) + + return wrapper + + +@core.extern +@dispatch +def clz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def popc(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def byte_perm(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def mulhi(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def mul24(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def brev(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sad(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def abs(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def floor(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rcp64h(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rsqrt(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ceil(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def trunc(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def exp2(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def saturatef(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fma_rn(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def fma_rz(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def fma_rd(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def fma_ru(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def fast_dividef(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def div_rn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def div_rz(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def div_rd(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def div_ru(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def rcp_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rcp_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rcp_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rcp_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sqrt_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sqrt_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sqrt_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sqrt_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sqrt(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def add_rn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def add_rz(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def add_rd(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def add_ru(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def mul_rn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def mul_rz(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def mul_rd(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def mul_ru(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def double2float_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2float_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2float_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2float_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2int_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2int_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2int_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2int_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2uint_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2uint_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2uint_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2uint_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def int2double_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def uint2double_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2int_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2int_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2int_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2int_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2uint_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2uint_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2uint_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2uint_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def int2float_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def int2float_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def int2float_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def int2float_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def uint2float_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def uint2float_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def uint2float_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def uint2float_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def hiloint2double(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def double2loint(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2hiint(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ll_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ll_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ll_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ll_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ull_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ull_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ull_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float2ull_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ll_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ll_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ll_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ll_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ull_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ull_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ull_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double2ull_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2float_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2float_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2float_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2float_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2float_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2float_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2float_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2float_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2double_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2double_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2double_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ll2double_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2double_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2double_rz(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2double_rd(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ull2double_ru(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def int_as_float(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float_as_int(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def uint_as_float(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def float_as_uint(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def longlong_as_double(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def double_as_longlong(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_sinf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_cosf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_log2f(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_logf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_expf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_tanf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_exp10f(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_log10f(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fast_powf(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def hadd(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def rhadd(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def sub_rn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def sub_rz(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def sub_rd(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def sub_ru(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def rsqrt_rn(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ffs(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rint(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def llrint(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def nearbyint(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def isnan(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def signbit(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def copysign(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def finitef(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def isinf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def nextafter(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def sin(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def cos(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sinpi(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def cospi(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def tan(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def log2(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def exp(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def exp10(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def cosh(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def sinh(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def tanh(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def atan2(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def atan(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def asin(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def acos(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def log(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def log10(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def log1p(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def acosh(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def asinh(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def atanh(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def expm1(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def hypot(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def rhypot(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def norm3d(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def rnorm3d(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def norm4d(arg0, arg1, arg2, arg3, _builder=None): + ... + + +@core.extern +@dispatch +def rnorm4d(arg0, arg1, arg2, arg3, _builder=None): + ... + + +@core.extern +@dispatch +def cbrt(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def rcbrt(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def j0(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def j1(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def y0(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def y1(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def yn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def jn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def cyl_bessel_i0(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def cyl_bessel_i1(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def erf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def erfinv(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def erfc(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def erfcx(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def erfcinv(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def normcdfinv(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def normcdf(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def lgamma(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def ldexp(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def scalbn(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def fmod(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def remainder(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def fma(arg0, arg1, arg2, _builder=None): + ... + + +@core.extern +@dispatch +def pow(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def tgamma(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def round(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def llround(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def fdim(arg0, arg1, _builder=None): + ... + + +@core.extern +@dispatch +def ilogb(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def logb(arg0, _builder=None): + ... + + +@core.extern +@dispatch +def isfinited(arg0, _builder=None): + ... diff --git a/vllm/lib/python3.10/site-packages/triton/language/standard.py b/vllm/lib/python3.10/site-packages/triton/language/standard.py new file mode 100644 index 0000000000000000000000000000000000000000..de30cf260bfa4a49d4b9ca0dcc1529f79992522d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/language/standard.py @@ -0,0 +1,441 @@ +from __future__ import annotations + +from ..runtime.jit import jit +from . import core +from . import math + +# constexpr utilities (triton metaprogramming sucks) + + +def _unwrap_if_constexpr(o): + return o.value if isinstance(o, core.constexpr) else o + + +def _log2(i: core.constexpr): + log2 = 0 + n = i.value + while n > 1: + n >>= 1 + log2 += 1 + return core.constexpr(log2) + + +def _is_power_of_two(i: core.constexpr): + n = i.value + return core.constexpr((n & (n - 1)) == 0 and n != 0) + + +# ----------------------- +# Standard library +# ----------------------- + + +@core._tensor_member_fn +@jit +def cdiv(x, div): + """ + Computes the ceiling division of :code:`x` by :code:`div` + + :param x: the input number + :type x: Block + :param div: the divisor + :param div: Block + """ + return (x + div - 1) // div + + +@core._tensor_member_fn +@jit +@math._add_math_1arg_docstr("sigmoid") +def sigmoid(x): + return 1 / (1 + math.exp(-x)) + + +@core._tensor_member_fn +@jit +@math._add_math_1arg_docstr("softmax") +def softmax(x, ieee_rounding=False): + z = x - max(x, 0) + num = math.exp(z) + den = sum(num, 0) + return math.fdiv(num, den, ieee_rounding) + + +@core._tensor_member_fn +@jit +def ravel(x): + """ + Returns a contiguous flattened view of :code:`x`. + + :param x: the input tensor + :type x: Block + """ + return core.reshape(x, [x.numel], can_reorder=True) + + +@jit +def swizzle2d(i, j, size_i, size_j, size_g): + """ + Transforms indices of a row-major :code:`size_i * size_j` matrix into those + of one where the indices are col-major for each group of :code:`size_g` + rows. + + For example, for :code:`size_i = size_j = 4` and :code:`size_g = 2`, it will + transform :: + + [[0 , 1 , 2 , 3 ], + [4 , 5 , 6 , 7 ], + [8 , 9 , 10, 11], + [12, 13, 14, 15]] + + into :: + + [[0, 2, 4 , 6 ], + [1, 3, 5 , 7 ], + [8, 10, 12, 14], + [9, 11, 13, 15]] + """ + # "unrolled index in array" + ij = i * size_j + j + # number of elements in `size_g` groups + # of `size_j` columns + size_gj = size_g * size_j + # index of the group in which (i,j) is + group_id = ij // size_gj + # row-index of the first element of this group + off_i = group_id * size_g + # last group may have fewer rows + size_g = core.minimum(size_i - off_i, size_g) + # new row and column indices + new_i = off_i + (ij % size_g) + new_j = (ij % size_gj) // size_g + return new_i, new_j + + +@jit +def zeros(shape, dtype): + """ + Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`. + + :param shape: Shape of the new array, e.g., (8, 16) or (8, ) + :type shape: tuple of ints + :param dtype: Data-type of the new array, e.g., :code:`tl.float16` + :type dtype: DType + """ + return core.full(shape, 0, dtype) + + +@jit +def zeros_like(input): + """ + Creates a tensor of zeros with the same shape and type as a given tensor. + """ + return zeros(input.shape, input.dtype) + + +# max and argmax + + +@jit +def _argmax_combine(value1, index1, value2, index2, tie_break_left): + if tie_break_left: + tie = value1 == value2 and index1 < index2 + else: + tie = False + gt = value1 > value2 or tie + v_ret = core.where(gt, value1, value2) + i_ret = core.where(gt, index1, index2) + return v_ret, i_ret + + +@jit +def _argmax_combine_tie_break_left(value1, index1, value2, index2): + return _argmax_combine(value1, index1, value2, index2, True) + + +@jit +def _argmax_combine_tie_break_fast(value1, index1, value2, index2): + return _argmax_combine(value1, index1, value2, index2, False) + + +@jit +def _elementwise_max(a, b): + return core.maximum(a, b) + + +@core._tensor_member_fn +@jit +@core._add_reduction_docstr("maximum", return_indices_arg="return_indices", + tie_break_arg="return_indices_tie_break_left") +def max(input, axis=None, return_indices=False, return_indices_tie_break_left=True, keep_dims=False): + input = core._promote_bfloat16_to_float32(input) + if return_indices: + if return_indices_tie_break_left: + return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_left, keep_dims=keep_dims) + else: + return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_fast, keep_dims=keep_dims) + else: + if core.constexpr(input.dtype.primitive_bitwidth) < core.constexpr(32): + if core.constexpr(input.dtype.is_floating()): + input = input.to(core.float32) + else: + assert input.dtype.is_int(), "Expecting input to be integer type" + input = input.to(core.int32) + return core.reduce(input, axis, _elementwise_max, keep_dims=keep_dims) + + +@core._tensor_member_fn +@jit +@core._add_reduction_docstr("maximum index", tie_break_arg="tie_break_left") +def argmax(input, axis, tie_break_left=True, keep_dims=False): + (_, ret) = max(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left, keep_dims=keep_dims) + return ret + + +# min and argmin + + +@jit +def _argmin_combine(value1, index1, value2, index2, tie_break_left): + if tie_break_left: + tie = value1 == value2 and index1 < index2 + else: + tie = False + lt = value1 < value2 or tie + value_ret = core.where(lt, value1, value2) + index_ret = core.where(lt, index1, index2) + return value_ret, index_ret + + +@jit +def _argmin_combine_tie_break_left(value1, index1, value2, index2): + return _argmin_combine(value1, index1, value2, index2, True) + + +@jit +def _argmin_combine_tie_break_fast(value1, index1, value2, index2): + return _argmin_combine(value1, index1, value2, index2, False) + + +@jit +def _elementwise_min(a, b): + return core.minimum(a, b) + + +@core._tensor_member_fn +@jit +@core._add_reduction_docstr("minimum", return_indices_arg="return_indices", + tie_break_arg="return_indices_tie_break_left") +def min(input, axis=None, return_indices=False, return_indices_tie_break_left=True, keep_dims=False): + input = core._promote_bfloat16_to_float32(input) + if return_indices: + if return_indices_tie_break_left: + return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_left, keep_dims=keep_dims) + else: + return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_fast, keep_dims=keep_dims) + else: + if core.constexpr(input.dtype.primitive_bitwidth) < 32: + if core.constexpr(input.dtype.is_floating()): + input = input.to(core.float32) + else: + assert input.dtype.is_int(), "Expecting input to be integer type" + input = input.to(core.int32) + return core.reduce(input, axis, _elementwise_min, keep_dims=keep_dims) + + +@core._tensor_member_fn +@jit +@core._add_reduction_docstr("minimum index", tie_break_arg="tie_break_left") +def argmin(input, axis, tie_break_left=True, keep_dims=False): + _, ret = min(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left, keep_dims=keep_dims) + return ret + + +@jit +def _sum_combine(a, b): + return a + b + + +# sum + + +@core._tensor_member_fn +@jit +@core._add_reduction_docstr("sum") +def sum(input, axis=None, keep_dims=False): + input = core._promote_bfloat16_to_float32(input) + return core.reduce(input, axis, _sum_combine, keep_dims=keep_dims) + + +@jit +def _xor_combine(a, b): + return a ^ b + + +# xor sum + + +@core._tensor_member_fn +@core.builtin +@core._add_reduction_docstr("xor sum") +def xor_sum(input, axis=None, keep_dims=False, _builder=None, _generator=None): + scalar_ty = input.type.scalar + if not scalar_ty.is_int(): + raise ValueError("xor_sum only supported for integers") + + input = core._promote_bfloat16_to_float32(input, _builder=_builder) + return core.reduce(input, axis, _xor_combine, keep_dims=keep_dims, _builder=_builder, _generator=_generator) + + +# cumsum + + +@core._tensor_member_fn +@jit +@core._add_scan_docstr("cumsum") +def cumsum(input, axis=0, reverse=False): + # todo rename this to a generic function name + input = core._promote_bfloat16_to_float32(input) + return core.associative_scan(input, axis, _sum_combine, reverse) + + +# cumprod + + +@jit +def _prod_combine(a, b): + return a * b + + +@core._tensor_member_fn +@jit +@core._add_scan_docstr("cumprod") +def cumprod(input, axis=0, reverse=False): + # todo rename this to a generic function name + input = core._promote_bfloat16_to_float32(input) + return core.associative_scan(input, axis, _prod_combine, reverse) + + +# sort + + +@jit +def _compare_and_swap(x, flip, i: core.constexpr, n_dims: core.constexpr): + n_outer: core.constexpr = x.numel >> n_dims + shape: core.constexpr = [n_outer * 2**i, 2, 2**(n_dims - i - 1)] + y = core.reshape(x, shape) + # slice left/right with 'stride' 2**(n_dims - i - 1) + mask = core.arange(0, 2)[None, :, None] + left = core.broadcast_to(sum(y * (1 - mask), 1)[:, None, :], shape) + right = core.broadcast_to(sum(y * mask, 1)[:, None, :], shape) + left = core.reshape(left, x.shape) + right = core.reshape(right, x.shape) + # actual compare-and-swap + idtype = core.get_int_dtype(bitwidth=x.dtype.primitive_bitwidth, signed=True) + ileft = left.to(idtype, bitcast=True) + iright = right.to(idtype, bitcast=True) + ix = x.to(idtype, bitcast=True) + ret = ix ^ core.where((left > right) ^ flip, ileft ^ iright, zeros_like(ix)) + return ret.to(x.dtype, bitcast=True) + + +@jit +def _bitonic_merge(x, stage: core.constexpr, order: core.constexpr, n_dims: core.constexpr): + ''' + order_type 0 == ascending + order_type 1 == descending + order_type 2 == alternating + ''' + n_outer: core.constexpr = x.numel >> n_dims + core.static_assert(stage <= n_dims) + # flip denotes whether to re-arrange sub-sequences of elements in ascending or + # descending order. + # if flip = 00000000... then all elements will be re-arranged ascendingly at this stage + # if flip = 00110011... then all the elements will be re-arranged alternatingly (with + # a stride of 2) at this stage + if order == 2: + shape: core.constexpr = [n_outer * 2**(n_dims - 1 - stage), 2, 2**stage] + flip = core.reshape(core.broadcast_to(core.arange(0, 2)[None, :, None], shape), x.shape) + else: + flip = order + # perform `stage` rounds of `compare-and-swap` + for i in core.static_range(stage): + x = _compare_and_swap(x, flip, i + (n_dims - stage), n_dims) + return x + + +@core._tensor_member_fn +@jit +def sort(x, dim: core.constexpr = None, descending: core.constexpr = core.CONSTEXPR_0): + # handle default dimension or check that it is the most minor dim + _dim: core.constexpr = len(x.shape) - 1 if dim is None else dim + core.static_assert(_dim == len(x.shape) - 1, "only minor dimension is currently supported") + # iteratively run bitonic merge-sort steps + n_dims: core.constexpr = _log2(x.shape[_dim]) + for i in core.static_range(1, n_dims + 1): + x = _bitonic_merge(x, i, 2 if i < n_dims else descending, n_dims) + return x + + +# flip + + +def _get_flip_dim(dim, shape): + dim = _unwrap_if_constexpr(dim) + shape = _unwrap_if_constexpr(shape) + if dim is None: + dim = len(shape) - 1 + assert dim == len(shape) - 1, "Currently only support flipping the last dimension" + return core.constexpr(dim) + + +@core._tensor_member_fn +@jit +def flip(x, dim=None): + """ + Flips a tensor `x` along the dimension `dim`. + + :param x: the first input tensor + :type x: Block + :param dim: the dimension to flip along (currently only final dimension supported) + :type dim: int + """ + core.static_assert(_is_power_of_two(x.shape[_get_flip_dim(dim, x.shape)])) + core.static_assert(_is_power_of_two(x.numel)) + # # reshape the tensor to have all dimensions be 2. + # # TODO: We shouldn't have to change the dimensions not sorted. + steps: core.constexpr = _log2(x.numel) + start: core.constexpr = _log2(x.numel) - _log2(x.shape[_get_flip_dim(dim, x.shape)]) + y = core.reshape(x, [2] * steps) + y = core.expand_dims(y, start) + flip = (core.arange(0, 2)[:, None] == 1 - core.arange(0, 2)) + for i in core.static_range(start, steps): + flip2 = flip + for j in core.static_range(0, steps + 1): + if j != i and j != i + 1: + flip2 = core.expand_dims(flip2, j) + y = sum(y * flip2, i + 1, keep_dims=True) + x = core.reshape(y, x.shape) + return x + + +@jit +def interleave(a, b): + """ + Interleaves the values of two tensors along their last dimension. + + The two tensors must have the same shape. + + Equivalent to `tl.join(a, b).reshape(a.shape[-1:] + [2 * a.shape[-1]])` + """ + c = core.join(a, b) + + assert isinstance(c.shape, list) + if len(c.shape) == 1: + # We must have interleaved two scalars. + return c + else: + # This `else` is necessary because Triton's AST parser doesn't + # understand that if we take the `if` above we definitely don't run this + # `else`. + return core.reshape(c, c.shape[:-2] + [2 * c.shape[-2]]) diff --git a/vllm/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc b/vllm/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a8e5141b73e844a68e39db4a78893eb9f52065a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/triton/tools/__pycache__/build_extern.cpython-310.pyc differ