diff --git a/.gitattributes b/.gitattributes index e74f24d7e3a47762ee2512ab854b73a029d0aede..16ba971f4457a872834c51a7f27e1f0ae3055562 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1565,3 +1565,12 @@ vllm/lib/python3.10/site-packages/torio/lib/libtorio_ffmpeg4.so filter=lfs diff= vllm/lib/python3.10/site-packages/torio/lib/libtorio_ffmpeg6.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/torio/lib/_torio_ffmpeg5.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/torio/lib/_torio_ffmpeg4.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5f27c39598d6f57f5ee68f6d70d6ca7c94dcdeb7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c4ebb97b90fed30f3c3a62d1b4157dd99a13aad68363e8f4362f30baa1ba3c +size 988040 diff --git a/parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd448a392b7fa2fff1764b95cce7e384535bdee4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f83546aeaf4bb5dc2874f93e104650528f90c4f4bde27ee2be1bed2e4ae85a64 +size 130975 diff --git a/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..a42748dba14b7ff0d2f53ce4cd5a86a4f08e5d93 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/fftpack/tests/fftw_single_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:276a9141318e6fc36e4ab6ff54a61b64054ef8849b660f17359e5f541b43c526 +size 95144 diff --git a/parrot/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c1aa4d8f736e71129137e5294b5f8115de382001 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fcadead067c87106c923a55256aa79741cf876f62a8ceb441526e7556813071 +size 295704 diff --git a/parrot/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz b/parrot/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz new file mode 100644 index 0000000000000000000000000000000000000000..8bddf805c36b29dc449556c27a2b489691f841af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d6803c0b398f2704c236f1d1b9e8e5ede06bd165a0abb0f228281abbd455ae9 +size 2648 diff --git a/parrot/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..646a71195cecbfcd728f17acf6bffd793ae21767 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a46d261a95b0a6e49625a78ec2063eaa154fe030ab29636492125a85b663ef36 +size 147488 diff --git a/parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a7e64ca48207abe5a9eae41f7255af02de5042d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd6efa9a572b995bae93e5a6ce23030d5ed8c517d8b702ad8c97d19f1fe174b5 +size 168151 diff --git a/parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d05bff437f8af4e06cff31a34424fbe591f9d39 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44113792e65f53fe8ead22380c953f676868f2ed811b917d96799d21b28403b8 +size 135055 diff --git a/parrot/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8c2c1ae072a9c7b82d6570627e8a64f6d3a3f62 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbe0dd7464159b234dd57ecf31a35d6759a38b1aa0fac882ab66c1378841661a +size 115571 diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__init__.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af3da7e79b2980430be5867a164d867137dbc184 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0114eee138b6fc8adb2ef0c04fc13e208c2b845a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d55aa89650eaad4ed820f5b28e48a086b9d23bf8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e8730a5eeec4a5d31320149d425a46bdd0ca29f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cephes_intp_cast.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cephes_intp_cast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..befc67a422030db11129485abab8bd2be5b4e011 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cephes_intp_cast.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a21679eadecb7bd20cbc84671d3923c98958c86 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c5f504bbd42ecc8a61aaf7b8ad867e3ea201da6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_data.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c73c54cadde384ccfc9a8689a0905e2b85ed93c9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_dd.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_dd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5536e4426c0f8f9d91bea8d13dfde5b8a9bcbb8e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_dd.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6ebda157bda44b420f4c0d86ac2589073546129 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..445187533de4861362d8eda48e5f96802d44c41c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c93ede22a61837caaa145242fd6fecd48be33a9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ded83b9beb885fcc5d1a8a0d89fb65820224d0eb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_extending.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_extending.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d420d84aea5156f81934ae4a16c33ed606c8a884 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_extending.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..889d0f8df6bf55aa016fe900559d2175db237937 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3ebed5cbdff796c84e3a6328047595f5e95f840 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..717705ebe9439462e912e1c717628b3125084ee4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ead35aa3e79bfe287abfee9463c6e2f3a11626b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_iv_ratio.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_iv_ratio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7088c8d97eb04a5bc3a9c8469e9a8a81f1b15f07 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_iv_ratio.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04ef80f45aef1c6942e8b72ba19cd35eda2d9adb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38e01e476d5e16a0e493b24b64a8cbfbff46120b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db515c280a577962bccca4aad8336d3c37d632c3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logit.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c918d15b30e51ccba2fd53b9a5915d70b89cee6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logit.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57915e7ba93baad7d8e38345d9fd1d5af9c949d3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d89391458f56967691f59b084d1b75c8604380 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f8156883a277e733332894ac8826e3b3c1aa8c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92bb1e74409fe3dd3e340a575fbb238f6de63570 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e01ae01585f89f09d19386aa979f1045591df4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06d501716a35f5c319d9fdff65e6b1099af38be9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3da16eef3a6f94ab3b59ea2a0cf243a9614602b4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bec06d97f5382a9f216c0ea881751621a3a2982c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8159786480e89d7fe38bde2ac720b59f66ce5cb5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cde0d127630c9af9099dd53f5277461e77ad5c12 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb8e14881bb9f7bc7bec97e3041c63b6ac0e3267 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c914df4141b9af131fa66fc760971d008b6e040 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sici.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sici.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1efe76d9e461ac12421123bff21c0baa20ba321 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sici.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_specfun.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_specfun.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd6c227fc8d0ec7d7ffc702232572494bc9717d9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_specfun.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6243e75a7cdf31d13860383ea55fa6afcfcb849c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da0c9bb87a186717009f47428e0e8e72c1849424 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a3d78cade3a15f3e0a32e9a4896cb1fe52ac60 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98c277917cd3093bf7bd3ef1b5190750aa5868b1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_trig.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_trig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0389727c8aeeb6be2a27c8a7e8f65890f10eab2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_trig.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b7f2c1982256748a4d2551ae2be29e8285aee87 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc0842b3873edd8e6f9d21920f20c7be727b972c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a80d61dac28933ac4c3080e8293af4065487199e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/_cython_examples/extending.pyx b/parrot/lib/python3.10/site-packages/scipy/special/tests/_cython_examples/extending.pyx new file mode 100644 index 0000000000000000000000000000000000000000..ca3bf2167f0f7726f8b0acb60ed8b8798a518d79 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/_cython_examples/extending.pyx @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +#cython: language_level=3 +#cython: boundscheck=False +#cython: wraparound=False + +from scipy.special.cython_special cimport beta, gamma + +cpdef double cy_beta(double a, double b): + return beta(a, b) + +cpdef double complex cy_gamma(double complex z): + return gamma(z) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/_cython_examples/meson.build b/parrot/lib/python3.10/site-packages/scipy/special/tests/_cython_examples/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..2a5e1535a16f840f31ca0207513e7c060767ea12 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/_cython_examples/meson.build @@ -0,0 +1,25 @@ +project('random-build-examples', 'c', 'cpp', 'cython') + +fs = import('fs') + +py3 = import('python').find_installation(pure: false) + +cy = meson.get_compiler('cython') + +if not cy.version().version_compare('>=3.0.8') + error('tests requires Cython >= 3.0.8') +endif + +py3.extension_module( + 'extending', + 'extending.pyx', + install: false, +) + +extending_cpp = fs.copyfile('extending.pyx', 'extending_cpp.pyx') +py3.extension_module( + 'extending_cpp', + extending_cpp, + install: false, + override_options : ['cython_language=cpp'] +) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_bdtr.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_bdtr.py new file mode 100644 index 0000000000000000000000000000000000000000..57694becc49b2028f17eac819b80a225ac010795 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_bdtr.py @@ -0,0 +1,112 @@ +import numpy as np +import scipy.special as sc +import pytest +from numpy.testing import assert_allclose, assert_array_equal, suppress_warnings + + +class TestBdtr: + def test(self): + val = sc.bdtr(0, 1, 0.5) + assert_allclose(val, 0.5) + + def test_sum_is_one(self): + val = sc.bdtr([0, 1, 2], 2, 0.5) + assert_array_equal(val, [0.25, 0.75, 1.0]) + + def test_rounding(self): + double_val = sc.bdtr([0.1, 1.1, 2.1], 2, 0.5) + int_val = sc.bdtr([0, 1, 2], 2, 0.5) + assert_array_equal(double_val, int_val) + + @pytest.mark.parametrize('k, n, p', [ + (np.inf, 2, 0.5), + (1.0, np.inf, 0.5), + (1.0, 2, np.inf) + ]) + def test_inf(self, k, n, p): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + val = sc.bdtr(k, n, p) + assert np.isnan(val) + + def test_domain(self): + val = sc.bdtr(-1.1, 1, 0.5) + assert np.isnan(val) + + +class TestBdtrc: + def test_value(self): + val = sc.bdtrc(0, 1, 0.5) + assert_allclose(val, 0.5) + + def test_sum_is_one(self): + val = sc.bdtrc([0, 1, 2], 2, 0.5) + assert_array_equal(val, [0.75, 0.25, 0.0]) + + def test_rounding(self): + double_val = sc.bdtrc([0.1, 1.1, 2.1], 2, 0.5) + int_val = sc.bdtrc([0, 1, 2], 2, 0.5) + assert_array_equal(double_val, int_val) + + @pytest.mark.parametrize('k, n, p', [ + (np.inf, 2, 0.5), + (1.0, np.inf, 0.5), + (1.0, 2, np.inf) + ]) + def test_inf(self, k, n, p): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + val = sc.bdtrc(k, n, p) + assert np.isnan(val) + + def test_domain(self): + val = sc.bdtrc(-1.1, 1, 0.5) + val2 = sc.bdtrc(2.1, 1, 0.5) + assert np.isnan(val2) + assert_allclose(val, 1.0) + + def test_bdtr_bdtrc_sum_to_one(self): + bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5) + bdtrc_vals = sc.bdtrc([0, 1, 2], 2, 0.5) + vals = bdtr_vals + bdtrc_vals + assert_allclose(vals, [1.0, 1.0, 1.0]) + + +class TestBdtri: + def test_value(self): + val = sc.bdtri(0, 1, 0.5) + assert_allclose(val, 0.5) + + def test_sum_is_one(self): + val = sc.bdtri([0, 1], 2, 0.5) + actual = np.asarray([1 - 1/np.sqrt(2), 1/np.sqrt(2)]) + assert_allclose(val, actual) + + def test_rounding(self): + double_val = sc.bdtri([0.1, 1.1], 2, 0.5) + int_val = sc.bdtri([0, 1], 2, 0.5) + assert_allclose(double_val, int_val) + + @pytest.mark.parametrize('k, n, p', [ + (np.inf, 2, 0.5), + (1.0, np.inf, 0.5), + (1.0, 2, np.inf) + ]) + def test_inf(self, k, n, p): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + val = sc.bdtri(k, n, p) + assert np.isnan(val) + + @pytest.mark.parametrize('k, n, p', [ + (-1.1, 1, 0.5), + (2.1, 1, 0.5) + ]) + def test_domain(self, k, n, p): + val = sc.bdtri(k, n, p) + assert np.isnan(val) + + def test_bdtr_bdtri_roundtrip(self): + bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5) + roundtrip_vals = sc.bdtri([0, 1, 2], 2, bdtr_vals) + assert_allclose(roundtrip_vals, [0.5, 0.5, np.nan]) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cephes_intp_cast.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cephes_intp_cast.py new file mode 100644 index 0000000000000000000000000000000000000000..05f3d1ae5c101ff50c75d1065e5e234063d192e4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_cephes_intp_cast.py @@ -0,0 +1,29 @@ +import pytest +import numpy as np +from scipy.special._ufuncs import ( + _smirnovc, _smirnovci, _smirnovp, + _struve_asymp_large_z, _struve_bessel_series, _struve_power_series, + bdtr, bdtrc, bdtri, expn, kn, nbdtr, nbdtrc, nbdtri, pdtri, + smirnov, smirnovi, yn +) + + +# +# For each ufunc here, verify that the default integer type, np.intp, +# can be safely cast to the integer type found in the input type signatures. +# For this particular set of functions, the code expects to find just one +# integer type among the input signatures. +# +@pytest.mark.parametrize( + 'ufunc', + [_smirnovc, _smirnovci, _smirnovp, + _struve_asymp_large_z, _struve_bessel_series, _struve_power_series, + bdtr, bdtrc, bdtri, expn, kn, nbdtr, nbdtrc, nbdtri, pdtri, + smirnov, smirnovi, yn], +) +def test_intp_safe_cast(ufunc): + int_chars = {'i', 'l', 'q'} + int_input = [set(sig.split('->')[0]) & int_chars for sig in ufunc.types] + int_char = ''.join(s.pop() if s else '' for s in int_input) + assert len(int_char) == 1, "More integer types in the signatures than expected" + assert np.can_cast(np.intp, np.dtype(int_char)) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_exponential_integrals.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_exponential_integrals.py new file mode 100644 index 0000000000000000000000000000000000000000..8332a83267e2f75dded04e80443c150c832676c8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_exponential_integrals.py @@ -0,0 +1,118 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc + + +class TestExp1: + + def test_branch_cut(self): + assert np.isnan(sc.exp1(-1)) + assert sc.exp1(complex(-1, 0)).imag == ( + -sc.exp1(complex(-1, -0.0)).imag + ) + + assert_allclose( + sc.exp1(complex(-1, 0)), + sc.exp1(-1 + 1e-20j), + atol=0, + rtol=1e-15 + ) + assert_allclose( + sc.exp1(complex(-1, -0.0)), + sc.exp1(-1 - 1e-20j), + atol=0, + rtol=1e-15 + ) + + def test_834(self): + # Regression test for #834 + a = sc.exp1(-complex(19.9999990)) + b = sc.exp1(-complex(19.9999991)) + assert_allclose(a.imag, b.imag, atol=0, rtol=1e-15) + + +class TestScaledExp1: + + @pytest.mark.parametrize('x, expected', [(0, 0), (np.inf, 1)]) + def test_limits(self, x, expected): + y = sc._ufuncs._scaled_exp1(x) + assert y == expected + + # The expected values were computed with mpmath, e.g.: + # + # from mpmath import mp + # mp.dps = 80 + # x = 1e-25 + # print(float(x*mp.exp(x)*np.expint(1, x))) + # + # prints 5.698741165994961e-24 + # + # The method used to compute _scaled_exp1 changes at x=1 + # and x=1250, so values at those inputs, and values just + # above and below them, are included in the test data. + @pytest.mark.parametrize('x, expected', + [(1e-25, 5.698741165994961e-24), + (0.1, 0.20146425447084518), + (0.9995, 0.5962509885831002), + (1.0, 0.5963473623231941), + (1.0005, 0.5964436833238044), + (2.5, 0.7588145912149602), + (10.0, 0.9156333393978808), + (100.0, 0.9901942286733019), + (500.0, 0.9980079523802055), + (1000.0, 0.9990019940238807), + (1249.5, 0.9992009578306811), + (1250.0, 0.9992012769377913), + (1250.25, 0.9992014363957858), + (2000.0, 0.9995004992514963), + (1e4, 0.9999000199940024), + (1e10, 0.9999999999), + (1e15, 0.999999999999999), + ]) + def test_scaled_exp1(self, x, expected): + y = sc._ufuncs._scaled_exp1(x) + assert_allclose(y, expected, rtol=2e-15) + + +class TestExpi: + + @pytest.mark.parametrize('result', [ + sc.expi(complex(-1, 0)), + sc.expi(complex(-1, -0.0)), + sc.expi(-1) + ]) + def test_branch_cut(self, result): + desired = -0.21938393439552027368 # Computed using Mpmath + assert_allclose(result, desired, atol=0, rtol=1e-14) + + def test_near_branch_cut(self): + lim_from_above = sc.expi(-1 + 1e-20j) + lim_from_below = sc.expi(-1 - 1e-20j) + assert_allclose( + lim_from_above.real, + lim_from_below.real, + atol=0, + rtol=1e-15 + ) + assert_allclose( + lim_from_above.imag, + -lim_from_below.imag, + atol=0, + rtol=1e-15 + ) + + def test_continuity_on_positive_real_axis(self): + assert_allclose( + sc.expi(complex(1, 0)), + sc.expi(complex(1, -0.0)), + atol=0, + rtol=1e-15 + ) + + +class TestExpn: + + def test_out_of_domain(self): + assert all(np.isnan([sc.expn(-1, 1.0), sc.expn(1, -1.0)])) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_extending.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_extending.py new file mode 100644 index 0000000000000000000000000000000000000000..57ab39a9d489c2e5a624106a708b5b7f882a7d46 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_extending.py @@ -0,0 +1,24 @@ +import os +import platform + +import pytest + +from scipy._lib._testutils import IS_EDITABLE,_test_cython_extension, cython +from scipy.special import beta, gamma + + +@pytest.mark.fail_slow(20) +# essential per https://github.com/scipy/scipy/pull/20487#discussion_r1567057247 +@pytest.mark.skipif(IS_EDITABLE, + reason='Editable install cannot find .pxd headers.') +@pytest.mark.skipif(platform.machine() in ["wasm32", "wasm64"], + reason="Can't start subprocess") +@pytest.mark.skipif(cython is None, reason="requires cython") +def test_cython(tmp_path): + srcdir = os.path.dirname(os.path.dirname(__file__)) + extensions, extensions_cpp = _test_cython_extension(tmp_path, srcdir) + # actually test the cython c-extensions + assert extensions.cy_beta(0.5, 0.1) == beta(0.5, 0.1) + assert extensions.cy_gamma(0.5 + 1.0j) == gamma(0.5 + 1.0j) + assert extensions_cpp.cy_beta(0.5, 0.1) == beta(0.5, 0.1) + assert extensions_cpp.cy_gamma(0.5 + 1.0j) == gamma(0.5 + 1.0j) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_iv_ratio.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_iv_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..d814d8d0669559f140b200361da74f38ec65d8db --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_iv_ratio.py @@ -0,0 +1,136 @@ +# This file contains unit tests for the iv_ratio() function. + +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.special._ufuncs import _iv_ratio as iv_ratio # type: ignore[attr-defined] + + +class TestIvRatio: + + @pytest.mark.parametrize('v,x,r', [ + (1, 0.3380952380952381, 0.1666773049170313), + (1, 0.7083333333333333, 0.33366443586989925), + (1, 1.1666666666666667, 0.5023355231537423), + (1, 1.8666666666666665, 0.674616572252164), + (1, 3.560606060606061, 0.844207659503163), + (2.34, 0.7975238095238094, 0.16704903081553285), + (2.34, 1.7133333333333334, 0.3360215931268845), + (2.34, 2.953333333333333, 0.50681909317803), + (2.34, 5.0826666666666656, 0.6755252698800679), + (2.34, 10.869696969696973, 0.8379351104498762), + (56.789, 19.46575238095238, 0.1667020505391409), + (56.789, 42.55008333333333, 0.33353809996933026), + (56.789, 75.552, 0.5003932381177826), + (56.789, 135.76026666666667, 0.6670528221946127), + (56.789, 307.8642424242425, 0.8334999441460798), + ]) + def test_against_reference_values(self, v, x, r): + """The reference values are computed using mpmath as follows. + + from mpmath import mp + mp.dps = 100 + + def iv_ratio_mp(v, x): + return mp.besseli(v, x) / mp.besseli(v - 1, x) + + def _sample(n, *, v): + '''Return n positive real numbers x such that iv_ratio(v, x) are + roughly evenly spaced over (0, 1). The formula is taken from [1]. + + [1] Banerjee A., Dhillon, I. S., Ghosh, J., Sra, S. (2005). + "Clustering on the Unit Hypersphere using von Mises-Fisher + Distributions." Journal of Machine Learning Research, + 6(46):1345-1382. + ''' + r = np.arange(1, n+1) / (n+1) + return r * (2*v-r*r) / (1-r*r) + + for v in (1, 2.34, 56.789): + xs = _sample(5, v=v) + for x in xs: + print(f"({v}, {x}, {float(iv_ratio_mp_float(v,x))}),") + """ + assert_allclose(iv_ratio(v, x), r, rtol=4e-16, atol=0) + + @pytest.mark.parametrize('v,x,r', [ + (1, np.inf, 1), + (np.inf, 1, 0), + ]) + def test_inf(self, v, x, r): + """If exactly one of v or x is inf and the other is within domain, + should return 0 or 1 accordingly. + + Also check that the function + never returns -0.0.""" + assert_equal(iv_ratio(v, x), r) + + @pytest.mark.parametrize('v', [np.nextafter(1, 0), -np.inf, np.nan, np.inf]) + @pytest.mark.parametrize('x', [-np.finfo(float).smallest_normal, + -np.finfo(float).smallest_subnormal, + -np.inf, np.nan, np.inf]) + def test_nan(self, v, x): + """If at least one argument is out of domain, or if v = x = inf, + the function should return nan.""" + assert_equal(iv_ratio(v, x), np.nan) + + @pytest.mark.parametrize('v', [1, np.finfo(float).max, np.inf]) + def test_zero_x(self, v): + """If x is +/-0.0, return x to agree with the limiting behavior.""" + assert_equal(iv_ratio(v, 0.0), 0.0) + assert_equal(iv_ratio(v, -0.0), -0.0) + + @pytest.mark.parametrize('v,x', [ + (1, np.finfo(float).smallest_normal), + (1, np.finfo(float).smallest_subnormal), + (1, np.finfo(float).smallest_subnormal*2), + (1e20, 123), + (np.finfo(float).max, 1), + (np.finfo(float).max, np.sqrt(np.finfo(float).max)), + ]) + def test_tiny_x(self, v, x): + """If x is much less than v, the bounds + + x x + --------------------------- <= R <= ----------------------- + v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2) + + collapses to R ~= x/2v. Test against this asymptotic expression. + """ + assert_equal(iv_ratio(v, x), (0.5*x)/v) + + @pytest.mark.parametrize('v,x', [ + (1, 1e16), + (1e20, 1e40), + (np.sqrt(np.finfo(float).max), np.finfo(float).max), + ]) + def test_huge_x(self, v, x): + """If x is much greater than v, the bounds + + x x + --------------------------- <= R <= ----------------------- + v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2) + + collapses to R ~= 1. Test against this asymptotic expression. + """ + assert_equal(iv_ratio(v, x), 1.0) + + @pytest.mark.parametrize('v,x', [ + (np.finfo(float).max, np.finfo(float).max), + (np.finfo(float).max / 3, np.finfo(float).max), + (np.finfo(float).max, np.finfo(float).max / 3), + ]) + def test_huge_v_x(self, v, x): + """If both x and v are very large, the bounds + + x x + --------------------------- <= R <= ----------------------- + v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2) + + collapses to R ~= x/(v+sqrt(x**2+v**2). Test against this asymptotic + expression, and in particular that no numerical overflow occurs during + intermediate calculations. + """ + t = x / v + expected = t / (1 + np.hypot(1, t)) + assert_allclose(iv_ratio(v, x), expected, rtol=4e-16, atol=0) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_kolmogorov.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_kolmogorov.py new file mode 100644 index 0000000000000000000000000000000000000000..bc427b0584ab87307c50ffb120fb8bc66a26df5a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_kolmogorov.py @@ -0,0 +1,495 @@ +import itertools +import sys +import pytest + +import numpy as np +from numpy.testing import assert_ +from scipy.special._testutils import FuncData + +from scipy.special import kolmogorov, kolmogi, smirnov, smirnovi +from scipy.special._ufuncs import (_kolmogc, _kolmogci, _kolmogp, + _smirnovc, _smirnovci, _smirnovp) + +_rtol = 1e-10 + +class TestSmirnov: + def test_nan(self): + assert_(np.isnan(smirnov(1, np.nan))) + + def test_basic(self): + dataset = [(1, 0.1, 0.9), + (1, 0.875, 0.125), + (2, 0.875, 0.125 * 0.125), + (3, 0.875, 0.125 * 0.125 * 0.125)] + + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0(self): + dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_1(self): + dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0point5(self): + dataset = [(1, 0.5, 0.5), + (2, 0.5, 0.25), + (3, 0.5, 0.166666666667), + (4, 0.5, 0.09375), + (5, 0.5, 0.056), + (6, 0.5, 0.0327932098765), + (7, 0.5, 0.0191958707681), + (8, 0.5, 0.0112953186035), + (9, 0.5, 0.00661933257355), + (10, 0.5, 0.003888705)] + + dataset = np.asarray(dataset) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_1(self): + x = np.linspace(0, 1, 101, endpoint=True) + dataset = np.column_stack([[1]*len(x), x, 1-x]) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_2(self): + x = np.linspace(0.5, 1, 101, endpoint=True) + p = np.power(1-x, 2) + n = np.array([2] * len(x)) + dataset = np.column_stack([n, x, p]) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_3(self): + x = np.linspace(0.7, 1, 31, endpoint=True) + p = np.power(1-x, 3) + n = np.array([3] * len(x)) + dataset = np.column_stack([n, x, p]) + FuncData( + smirnov, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData( + _smirnovc, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_large(self): + # test for large values of n + # Probabilities should go down as n goes up + x = 0.4 + pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)]) + dfs = np.diff(pvals) + assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs) + + +class TestSmirnovi: + def test_nan(self): + assert_(np.isnan(smirnovi(1, np.nan))) + + def test_basic(self): + dataset = [(1, 0.4, 0.6), + (1, 0.6, 0.4), + (1, 0.99, 0.01), + (1, 0.01, 0.99), + (2, 0.125 * 0.125, 0.875), + (3, 0.125 * 0.125 * 0.125, 0.875), + (10, 1.0 / 16 ** 10, 1 - 1.0 / 16)] + + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0(self): + dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_1(self): + dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_1(self): + pp = np.linspace(0, 1, 101, endpoint=True) + # dataset = np.array([(1, p, 1-p) for p in pp]) + dataset = np.column_stack([[1]*len(pp), pp, 1-pp]) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_2(self): + x = np.linspace(0.5, 1, 101, endpoint=True) + p = np.power(1-x, 2) + n = np.array([2] * len(x)) + dataset = np.column_stack([n, p, x]) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_n_equals_3(self): + x = np.linspace(0.7, 1, 31, endpoint=True) + p = np.power(1-x, 3) + n = np.array([3] * len(x)) + dataset = np.column_stack([n, p, x]) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_round_trip(self): + def _sm_smi(n, p): + return smirnov(n, smirnovi(n, p)) + + def _smc_smci(n, p): + return _smirnovc(n, _smirnovci(n, p)) + + dataset = [(1, 0.4, 0.4), + (1, 0.6, 0.6), + (2, 0.875, 0.875), + (3, 0.875, 0.875), + (3, 0.125, 0.125), + (10, 0.999, 0.999), + (10, 0.0001, 0.0001)] + + dataset = np.asarray(dataset) + FuncData( + _sm_smi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + FuncData( + _smc_smci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_x_equals_0point5(self): + dataset = [(1, 0.5, 0.5), + (2, 0.5, 0.366025403784), + (2, 0.25, 0.5), + (3, 0.5, 0.297156508177), + (4, 0.5, 0.255520481121), + (5, 0.5, 0.234559536069), + (6, 0.5, 0.21715965898), + (7, 0.5, 0.202722580034), + (8, 0.5, 0.190621765256), + (9, 0.5, 0.180363501362), + (10, 0.5, 0.17157867006)] + + dataset = np.asarray(dataset) + FuncData( + smirnovi, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData( + _smirnovci, dataset, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + +class TestSmirnovp: + def test_nan(self): + assert_(np.isnan(_smirnovp(1, np.nan))) + + def test_basic(self): + # Check derivative at endpoints + n1_10 = np.arange(1, 10) + dataset0 = np.column_stack([n1_10, + np.full_like(n1_10, 0), + np.full_like(n1_10, -1)]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + n2_10 = np.arange(2, 10) + dataset1 = np.column_stack([n2_10, + np.full_like(n2_10, 1.0), + np.full_like(n2_10, 0)]) + FuncData( + _smirnovp, dataset1, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_oneminusoneovern(self): + # Check derivative at x=1-1/n + n = np.arange(1, 20) + x = 1.0/n + xm1 = 1-1.0/n + pp1 = -n * x**(n-1) + pp1 -= (1-np.sign(n-2)**2) * 0.5 # n=2, x=0.5, 1-1/n = 0.5, need to adjust + dataset1 = np.column_stack([n, xm1, pp1]) + FuncData( + _smirnovp, dataset1, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_oneovertwon(self): + # Check derivative at x=1/2n (Discontinuous at x=1/n, so check at x=1/2n) + n = np.arange(1, 20) + x = 1.0/2/n + pp = -(n*x+1) * (1+x)**(n-2) + dataset0 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + def test_oneovern(self): + # Check derivative at x=1/n + # (Discontinuous at x=1/n, hard to tell if x==1/n, only use n=power of 2) + n = 2**np.arange(1, 10) + x = 1.0/n + pp = -(n*x+1) * (1+x)**(n-2) + 0.5 + dataset0 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + @pytest.mark.xfail(sys.maxsize <= 2**32, + reason="requires 64-bit platform") + def test_oneovernclose(self): + # Check derivative at x=1/n + # (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon) + n = np.arange(3, 20) + + x = 1.0/n - 2*np.finfo(float).eps + pp = -(n*x+1) * (1+x)**(n-2) + dataset0 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset0, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + x = 1.0/n + 2*np.finfo(float).eps + pp = -(n*x+1) * (1+x)**(n-2) + 1 + dataset1 = np.column_stack([n, x, pp]) + FuncData( + _smirnovp, dataset1, (0, 1), 2, rtol=_rtol + ).check(dtypes=[int, float, float]) + + +class TestKolmogorov: + def test_nan(self): + assert_(np.isnan(kolmogorov(np.nan))) + + def test_basic(self): + dataset = [(0, 1.0), + (0.5, 0.96394524366487511), + (0.8275735551899077, 0.5000000000000000), + (1, 0.26999967167735456), + (2, 0.00067092525577969533)] + + dataset = np.asarray(dataset) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + + def test_linspace(self): + x = np.linspace(0, 2.0, 21) + dataset = [1.0000000000000000, 1.0000000000000000, 0.9999999999994950, + 0.9999906941986655, 0.9971923267772983, 0.9639452436648751, + 0.8642827790506042, 0.7112351950296890, 0.5441424115741981, + 0.3927307079406543, 0.2699996716773546, 0.1777181926064012, + 0.1122496666707249, 0.0680922218447664, 0.0396818795381144, + 0.0222179626165251, 0.0119520432391966, 0.0061774306344441, + 0.0030676213475797, 0.0014636048371873, 0.0006709252557797] + + dataset_c = [0.0000000000000000, 6.609305242245699e-53, 5.050407338670114e-13, + 9.305801334566668e-06, 0.0028076732227017, 0.0360547563351249, + 0.1357172209493958, 0.2887648049703110, 0.4558575884258019, + 0.6072692920593457, 0.7300003283226455, 0.8222818073935988, + 0.8877503333292751, 0.9319077781552336, 0.9603181204618857, + 0.9777820373834749, 0.9880479567608034, 0.9938225693655559, + 0.9969323786524203, 0.9985363951628127, 0.9993290747442203] + + dataset = np.column_stack([x, dataset]) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + dataset_c = np.column_stack([x, dataset_c]) + FuncData(_kolmogc, dataset_c, (0,), 1, rtol=_rtol).check() + + def test_linspacei(self): + p = np.linspace(0, 1.0, 21, endpoint=True) + dataset = [np.inf, 1.3580986393225507, 1.2238478702170823, + 1.1379465424937751, 1.0727491749396481, 1.0191847202536859, + 0.9730633753323726, 0.9320695842357622, 0.8947644549851197, + 0.8601710725555463, 0.8275735551899077, 0.7964065373291559, + 0.7661855555617682, 0.7364542888171910, 0.7067326523068980, + 0.6764476915028201, 0.6448126061663567, 0.6105590999244391, + 0.5711732651063401, 0.5196103791686224, 0.0000000000000000] + + dataset_c = [0.0000000000000000, 0.5196103791686225, 0.5711732651063401, + 0.6105590999244391, 0.6448126061663567, 0.6764476915028201, + 0.7067326523068980, 0.7364542888171910, 0.7661855555617682, + 0.7964065373291559, 0.8275735551899077, 0.8601710725555463, + 0.8947644549851196, 0.9320695842357622, 0.9730633753323727, + 1.0191847202536859, 1.0727491749396481, 1.1379465424937754, + 1.2238478702170825, 1.3580986393225509, np.inf] + + dataset = np.column_stack([p[1:], dataset[1:]]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + dataset_c = np.column_stack([p[:-1], dataset_c[:-1]]) + FuncData(_kolmogci, dataset_c, (0,), 1, rtol=_rtol).check() + + def test_smallx(self): + epsilon = 0.1 ** np.arange(1, 14) + x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217, + 0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254, + 0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658, + 0.19487060742]) + + dataset = np.column_stack([x, 1-epsilon]) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + + def test_round_trip(self): + def _ki_k(_x): + return kolmogi(kolmogorov(_x)) + + def _kci_kc(_x): + return _kolmogci(_kolmogc(_x)) + + x = np.linspace(0.0, 2.0, 21, endpoint=True) + # Exclude 0.1, 0.2. 0.2 almost makes succeeds, but 0.1 has no chance. + x02 = x[(x == 0) | (x > 0.21)] + dataset02 = np.column_stack([x02, x02]) + FuncData(_ki_k, dataset02, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([x, x]) + FuncData(_kci_kc, dataset, (0,), 1, rtol=_rtol).check() + + +class TestKolmogi: + def test_nan(self): + assert_(np.isnan(kolmogi(np.nan))) + + def test_basic(self): + dataset = [(1.0, 0), + (0.96394524366487511, 0.5), + (0.9, 0.571173265106), + (0.5000000000000000, 0.8275735551899077), + (0.26999967167735456, 1), + (0.00067092525577969533, 2)] + + dataset = np.asarray(dataset) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + def test_smallpcdf(self): + epsilon = 0.5 ** np.arange(1, 55, 3) + # kolmogi(1-p) == _kolmogci(p) if 1-(1-p) == p, but not necessarily otherwise + # Use epsilon s.t. 1-(1-epsilon)) == epsilon, + # so can use same x-array for both results + + x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941, + 0.3736868442620478, 0.3345161714909591, 0.3057833329315859, + 0.2835052890528936, 0.2655578150208676, 0.2506869966107999, + 0.2380971058736669, 0.2272549289962079, 0.2177876361600040, + 0.2094254686862041, 0.2019676748836232, 0.1952612948137504, + 0.1891874239646641, 0.1836520225050326, 0.1785795904846466]) + + dataset = np.column_stack([1-epsilon, x]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([epsilon, x]) + FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check() + + def test_smallpsf(self): + epsilon = 0.5 ** np.arange(1, 55, 3) + # kolmogi(p) == _kolmogci(1-p) if 1-(1-p) == p, but not necessarily otherwise + # Use epsilon s.t. 1-(1-epsilon)) == epsilon, + # so can use same x-array for both results + + x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343, + 1.9525136345289607, 2.2027324540033235, 2.4272929437460848, + 2.6327688477341593, 2.8233300509220260, 3.0018183401530627, + 3.1702735084088891, 3.3302184446307912, 3.4828258153113318, + 3.6290214150152051, 3.7695513262825959, 3.9050272690877326, + 4.0359582187082550, 4.1627730557884890, 4.2858371743264527]) + + dataset = np.column_stack([epsilon, x]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([1-epsilon, x]) + FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check() + + def test_round_trip(self): + def _k_ki(_p): + return kolmogorov(kolmogi(_p)) + + p = np.linspace(0.1, 1.0, 10, endpoint=True) + dataset = np.column_stack([p, p]) + FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check() + + +class TestKolmogp: + def test_nan(self): + assert_(np.isnan(_kolmogp(np.nan))) + + def test_basic(self): + dataset = [(0.000000, -0.0), + (0.200000, -1.532420541338916e-10), + (0.400000, -0.1012254419260496), + (0.600000, -1.324123244249925), + (0.800000, -1.627024345636592), + (1.000000, -1.071948558356941), + (1.200000, -0.538512430720529), + (1.400000, -0.2222133182429472), + (1.600000, -0.07649302775520538), + (1.800000, -0.02208687346347873), + (2.000000, -0.005367402045629683)] + + dataset = np.asarray(dataset) + FuncData(_kolmogp, dataset, (0,), 1, rtol=_rtol).check() diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_owens_t.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_owens_t.py new file mode 100644 index 0000000000000000000000000000000000000000..8d15aead25302023c5f07d8392c0931995764ced --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_owens_t.py @@ -0,0 +1,53 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose + +import scipy.special as sc + + +def test_symmetries(): + np.random.seed(1234) + a, h = np.random.rand(100), np.random.rand(100) + assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a)) + assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a)) + + +def test_special_cases(): + assert_equal(sc.owens_t(5, 0), 0) + assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi, + rtol=5e-14) + # Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the + # standard normal distribution + assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07, + rtol=5e-14) + + +def test_nans(): + assert_equal(sc.owens_t(20, np.nan), np.nan) + assert_equal(sc.owens_t(np.nan, 20), np.nan) + assert_equal(sc.owens_t(np.nan, np.nan), np.nan) + + +def test_infs(): + h, a = 0, np.inf + # T(0, a) = 1/2Ï€ * arctan(a) + res = 1/(2*np.pi) * np.arctan(a) + assert_allclose(sc.owens_t(h, a), res, rtol=5e-14) + assert_allclose(sc.owens_t(h, -a), -res, rtol=5e-14) + + h = 1 + # Refer Owens T function definition in Wikipedia + # https://en.wikipedia.org/wiki/Owen%27s_T_function + # Value approximated through Numerical Integration + # using scipy.integrate.quad + # quad(lambda x: 1/(2*pi)*(exp(-0.5*(1*1)*(1+x*x))/(1+x*x)), 0, inf) + res = 0.07932762696572854 + assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14) + assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14) + + assert_equal(sc.owens_t(np.inf, 1), 0) + assert_equal(sc.owens_t(-np.inf, 1), 0) + + assert_equal(sc.owens_t(np.inf, np.inf), 0) + assert_equal(sc.owens_t(-np.inf, np.inf), 0) + assert_equal(sc.owens_t(np.inf, -np.inf), -0.0) + assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c42aa688081fb58f79ad2c8ea932d03b33523b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_pcf.py @@ -0,0 +1,24 @@ +"""Tests for parabolic cylinder functions. + +""" +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import scipy.special as sc + + +def test_pbwa_segfault(): + # Regression test for https://github.com/scipy/scipy/issues/6208. + # + # Data generated by mpmath. + # + w = 1.02276567211316867161 + wp = -0.48887053372346189882 + assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0) + + +def test_pbwa_nan(): + # Check that NaN's are returned outside of the range in which the + # implementation is accurate. + pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)] + for p in pts: + assert_equal(sc.pbwa(*p), (np.nan, np.nan)) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_powm1.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_powm1.py new file mode 100644 index 0000000000000000000000000000000000000000..3d809963f64ddaedf6b59de80dcd5f7ca8fa18a9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_powm1.py @@ -0,0 +1,65 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose +from scipy.special import powm1 + + +# Expected values were computed with mpmath, e.g. +# +# >>> import mpmath +# >>> mpmath.np.dps = 200 +# >>> print(float(mpmath.powm1(2.0, 1e-7)) +# 6.931472045825965e-08 +# +powm1_test_cases = [ + (1.25, 0.75, 0.18217701125396976, 1e-15), + (2.0, 1e-7, 6.931472045825965e-08, 1e-15), + (25.0, 5e-11, 1.6094379125636148e-10, 1e-15), + (0.99996, 0.75, -3.0000150002530058e-05, 1e-15), + (0.9999999999990905, 20, -1.81898940353014e-11, 1e-15), + (-1.25, 751.0, -6.017550852453444e+72, 2e-15) +] + + +@pytest.mark.parametrize('x, y, expected, rtol', powm1_test_cases) +def test_powm1(x, y, expected, rtol): + p = powm1(x, y) + assert_allclose(p, expected, rtol=rtol) + + +@pytest.mark.parametrize('x, y, expected', + [(0.0, 0.0, 0.0), + (0.0, -1.5, np.inf), + (0.0, 1.75, -1.0), + (-1.5, 2.0, 1.25), + (-1.5, 3.0, -4.375), + (np.nan, 0.0, 0.0), + (1.0, np.nan, 0.0), + (1.0, np.inf, 0.0), + (1.0, -np.inf, 0.0), + (np.inf, 7.5, np.inf), + (np.inf, -7.5, -1.0), + (3.25, np.inf, np.inf), + (np.inf, np.inf, np.inf), + (np.inf, -np.inf, -1.0), + (np.inf, 0.0, 0.0), + (-np.inf, 0.0, 0.0), + (-np.inf, 2.0, np.inf), + (-np.inf, 3.0, -np.inf), + (-1.0, float(2**53 - 1), -2.0)]) +def test_powm1_exact_cases(x, y, expected): + # Test cases where we have an exact expected value. + p = powm1(x, y) + assert p == expected + + +@pytest.mark.parametrize('x, y', + [(-1.25, 751.03), + (-1.25, np.inf), + (np.nan, np.nan), + (-np.inf, -np.inf), + (-np.inf, 2.5)]) +def test_powm1_return_nan(x, y): + # Test cases where the expected return value is nan. + p = powm1(x, y) + assert np.isnan(p) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_precompute_utils.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_precompute_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..89616b92329691ca76039fe11a7e08f7f3db1150 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_precompute_utils.py @@ -0,0 +1,36 @@ +import pytest + +from scipy.special._testutils import MissingModule, check_version +from scipy.special._mptestutils import mp_assert_allclose +from scipy.special._precompute.utils import lagrange_inversion + +try: + import sympy +except ImportError: + sympy = MissingModule('sympy') + +try: + import mpmath as mp +except ImportError: + mp = MissingModule('mpmath') + + +@pytest.mark.slow +@check_version(sympy, '0.7') +@check_version(mp, '0.19') +class TestInversion: + @pytest.mark.xfail_on_32bit("rtol only 2e-9, see gh-6938") + def test_log(self): + with mp.workdps(30): + logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10) + expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10) + invlogcoeffs = lagrange_inversion(logcoeffs) + mp_assert_allclose(invlogcoeffs, expcoeffs) + + @pytest.mark.xfail_on_32bit("rtol only 1e-15, see gh-6938") + def test_sin(self): + with mp.workdps(30): + sincoeffs = mp.taylor(mp.sin, 0, 10) + asincoeffs = mp.taylor(mp.asin, 0, 10) + invsincoeffs = lagrange_inversion(sincoeffs) + mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_specfun.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_specfun.py new file mode 100644 index 0000000000000000000000000000000000000000..d096f60dd65380a9847d5d5beb7380523445fc5c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_specfun.py @@ -0,0 +1,40 @@ +""" +Various made-up tests to hit different branches of the code in specfun.c +""" + +import numpy as np +from numpy.testing import assert_allclose +from scipy import special + + +def test_cva2_cv0_branches(): + res, resp = special.mathieu_cem([40, 129], [13, 14], [30, 45]) + assert_allclose(res, np.array([-0.3741211, 0.74441928])) + assert_allclose(resp, np.array([-37.02872758, -86.13549877])) + + res, resp = special.mathieu_sem([40, 129], [13, 14], [30, 45]) + assert_allclose(res, np.array([0.92955551, 0.66771207])) + assert_allclose(resp, np.array([-14.91073448, 96.02954185])) + + +def test_chgm_branches(): + res = special.eval_genlaguerre(-3.2, 3, 2.5) + assert_allclose(res, -0.7077721935779854) + + +def test_hygfz_branches(): + """(z == 1.0) && (c-a-b > 0.0)""" + res = special.hyp2f1(1.5, 2.5, 4.5, 1.+0.j) + assert_allclose(res, 10.30835089459151+0j) + """(cabs(z+1) < eps) && (fabs(c-a+b - 1.0) < eps)""" + res = special.hyp2f1(5+5e-16, 2, 2, -1.0 + 5e-16j) + assert_allclose(res, 0.031249999999999986+3.9062499999999994e-17j) + + +def test_pro_rad1(): + # https://github.com/scipy/scipy/issues/21058 + # Reference values taken from WolframAlpha + # SpheroidalS1(1, 1, 30, 1.1) + # SpheroidalS1Prime(1, 1, 30, 1.1) + res = special.pro_rad1(1, 1, 30, 1.1) + assert_allclose(res, (0.009657872296166435, 3.253369651472877), rtol=2e-5) diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spence.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spence.py new file mode 100644 index 0000000000000000000000000000000000000000..fbb26ac281dff81ea71b30318731065fe5a78f94 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_spence.py @@ -0,0 +1,32 @@ +import numpy as np +from numpy import sqrt, log, pi +from scipy.special._testutils import FuncData +from scipy.special import spence + + +def test_consistency(): + # Make sure the implementation of spence for real arguments + # agrees with the implementation of spence for imaginary arguments. + + x = np.logspace(-30, 300, 200) + dataset = np.vstack((x + 0j, spence(x))).T + FuncData(spence, dataset, 0, 1, rtol=1e-14).check() + + +def test_special_points(): + # Check against known values of Spence's function. + + phi = (1 + sqrt(5))/2 + dataset = [(1, 0), + (2, -pi**2/12), + (0.5, pi**2/12 - log(2)**2/2), + (0, pi**2/6), + (-1, pi**2/4 - 1j*pi*log(2)), + ((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2), + ((3 - sqrt(5))/2, pi**2/10 - log(phi)**2), + (phi, -pi**2/15 + log(phi)**2/2), + # Corrected from Zagier, "The Dilogarithm Function" + ((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)] + + dataset = np.asarray(dataset) + FuncData(spence, dataset, 0, 1, rtol=1e-14).check() diff --git a/parrot/lib/python3.10/site-packages/scipy/special/tests/test_ufunc_signatures.py b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_ufunc_signatures.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc3ffae15ab4620c4e752df166721825cb7449c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/special/tests/test_ufunc_signatures.py @@ -0,0 +1,46 @@ +"""Test that all ufuncs have float32-preserving signatures. + +This was once guaranteed through the code generation script for +generating ufuncs, `scipy/special/_generate_pyx.py`. Starting with +gh-20260, SciPy developers have begun moving to generate ufuncs +through direct use of the NumPy C API (through C++). Existence of +float32 preserving signatures must now be tested since it is no +longer guaranteed. +""" + +import numpy as np +import pytest +import scipy.special._ufuncs +import scipy.special._gufuncs + +_ufuncs = [] +for funcname in dir(scipy.special._ufuncs): + _ufuncs.append(getattr(scipy.special._ufuncs, funcname)) +for funcname in dir(scipy.special._gufuncs): + _ufuncs.append(getattr(scipy.special._gufuncs, funcname)) + +# Not all module members are actually ufuncs +_ufuncs = [func for func in _ufuncs if isinstance(func, np.ufunc)] + +@pytest.mark.parametrize("ufunc", _ufuncs) +def test_ufunc_signatures(ufunc): + + # From _generate_pyx.py + # "Don't add float32 versions of ufuncs with integer arguments, as this + # can lead to incorrect dtype selection if the integer arguments are + # arrays, but float arguments are scalars. + # For instance sph_harm(0,[0],0,0).dtype == complex64 + # This may be a NumPy bug, but we need to work around it. + # cf. gh-4895, https://github.com/numpy/numpy/issues/5895" + types = set(sig for sig in ufunc.types + if not ("l" in sig or "i" in sig or "q" in sig or "p" in sig)) + + # Generate the full expanded set of signatures which should exist. There + # should be matching float and double versions of any existing signature. + expanded_types = set() + for sig in types: + expanded_types.update( + [sig.replace("d", "f").replace("D", "F"), + sig.replace("f", "d").replace("F", "D")] + ) + assert types == expanded_types diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d132871135e72f46172326d4d316c30acc89122e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43c4accafa80076ab0261a902011d20b792a10db7e2ea1faba4f83d66254dce9 +size 124305 diff --git a/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3e6b95e9bf01f8224b5289cd7b530be5b15f607 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:539512de2d73fa90cbfb11dd98f04856c6b14a3ff710712e8ec26f41224d97db +size 368146 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7930f0898c0521da266ebbe79d0ec5cd23fb5bbd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _empty_per_channel_affine_quantized { + using schema = at::Tensor (c10::SymIntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_empty_per_channel_affine_quantized") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") + static at::Tensor call(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); +}; + +struct TORCH_API _empty_per_channel_affine_quantized_out { + using schema = at::Tensor & (c10::SymIntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_empty_per_channel_affine_quantized") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b87f4ea671785326b61c63e983bf4357c7fc8e5b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_floor_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_floor_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ff2a91c12644e69fd44ab8f252c262f57610d561 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _lstm_mps { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_lstm_mps") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API _lstm_mps_out { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_lstm_mps") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))") + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..406098009966baf37efea23e78407119aacdd936 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _nested_tensor_strides_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _nested_tensor_strides_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d5c48829c5b9e271af872659253938357dcbb2b9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _prelu_kernel { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_prelu_kernel") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_prelu_kernel(Tensor self, Tensor weight) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a7130019366087af497a9d1dde6b4b50552b48ee --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) +inline ::std::tuple _scaled_dot_product_flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); +} +namespace symint { + template ::value>> + ::std::tuple _scaled_dot_product_flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } +} + +// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) +inline ::std::tuple _scaled_dot_product_flash_attention_backward_symint(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); +} +namespace symint { + template ::value>> + ::std::tuple _scaled_dot_product_flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt) { + return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h new file mode 100644 index 0000000000000000000000000000000000000000..62f812925f15c4ff5556f1bb3f7eb648df15a61e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _to_dense_out(const at::Tensor & self, c10::optional dtype, c10::optional masked_grad, at::Tensor & out); +TORCH_API at::Tensor sparse_to_dense(const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +TORCH_API at::Tensor sparse_compressed_to_dense(const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +TORCH_API at::Tensor mkldnn_to_dense(const at::Tensor & self, c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7503e6723adf9f6cd23f8dac5d9cdda29b2be6a1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor as_strided_tensorimpl(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt); +TORCH_API at::Tensor as_strided_tensorimpl_meta_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt); +TORCH_API at::Tensor as_strided_qtensorimpl(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt); +TORCH_API const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f7930cf8f1730d785d335f5e4ca2bd9aec9c56dd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fill_diagonal_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill_diagonal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & fill_value, bool wrap); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & fill_value, bool wrap); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6fcd9d57a143ee5564ce762bcdb7bfb3d24f8c52 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_outf(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fe65643c17f8337f74995bc6491175664d66c6f2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mps_convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple mps_convolution_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple mps_convolution_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask); +TORCH_API ::std::tuple mps_convolution_backward_symint_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/result_type_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/result_type_native.h new file mode 100644 index 0000000000000000000000000000000000000000..65afcffbbbb12c753476be7708674d2aa133cb1f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/result_type_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::ScalarType result_type(const at::Tensor & tensor, const at::Tensor & other); +TORCH_API at::ScalarType result_type(const at::Tensor & tensor, const at::Scalar & other); +TORCH_API at::ScalarType result_type(const at::Scalar & scalar, const at::Tensor & tensor); +TORCH_API at::ScalarType result_type(const at::Scalar & scalar1, const at::Scalar & scalar2); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/triton/compiler/__pycache__/errors.cpython-310.pyc b/vllm/lib/python3.10/site-packages/triton/compiler/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df90b5e7a13fbff858b95e5c97d0a29025233768 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/triton/compiler/__pycache__/errors.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/triton/compiler/code_generator.py b/vllm/lib/python3.10/site-packages/triton/compiler/code_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..6903052ca21966fcf4edb29d8429458457c51051 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/triton/compiler/code_generator.py @@ -0,0 +1,1302 @@ +import ast +import inspect +import re +import sys +import warnings +import os +import textwrap +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union +from .. import language +from .._C.libtriton import ir +from ..language import constexpr, tensor, str_to_ty +from ..runtime.jit import _normalize_ty +# ideally we wouldn't need any runtime component +from ..runtime import JITFunction +from .errors import (CompilationError, CompileTimeAssertionFailure, UnsupportedLanguageConstruct) +from types import ModuleType + + +def mangle_ty(ty): + if ty.is_ptr(): + return 'P' + mangle_ty(ty.element_ty) + if ty.is_int(): + SIGNED = language.dtype.SIGNEDNESS.SIGNED + prefix = 'i' if ty.int_signedness == SIGNED else 'u' + return prefix + str(ty.int_bitwidth) + if ty.is_floating(): + return str(ty) + if ty.is_block(): + elt = mangle_ty(ty.scalar) + shape = '_'.join(map(str, ty.shape)) + return f'{elt}S{shape}S' + if ty.is_void(): + return 'V' + assert False, "Unsupported type" + + +def mangle_fn(name, arg_tys, constants): + # doesn't mangle ret type, which must be a function of arg tys + mangled_arg_names = '_'.join([mangle_ty(ty) for ty in arg_tys]) + mangled_constants = '_'.join([f'{i}c{repr(constants[i])}' for i in sorted(constants)]) + mangled_constants = mangled_constants.replace('.', '_d_') + mangled_constants = mangled_constants.replace("'", '_sq_') + # [ and ] are not allowed in LLVM identifiers + mangled_constants = mangled_constants.replace('[', '_').replace(']', '_') + ret = f'{name}__{mangled_arg_names}__{mangled_constants}' + return ret + + +def _is_triton_tensor(o: Any) -> bool: + return isinstance(o, tensor) + + +def _is_constexpr(o: Any) -> bool: + return isinstance(o, constexpr) + + +def _is_triton_scalar(o: Any) -> bool: + return _is_triton_tensor(o) and (not o.type.is_block() or o.type.numel == 1) + + +def _is_list_like(o: Any) -> bool: + return isinstance(o, (list, tuple)) + + +def _unwrap_if_constexpr(o: Any): + return o.value if isinstance(o, constexpr) else o + + +def _check_fn_args(node, fn, args): + if fn.noinline: + for idx, arg in enumerate(args): + if not _is_constexpr(arg) and not _is_triton_scalar(arg): + raise UnsupportedLanguageConstruct( + fn.src, node, + f'Function {fn.__name__} is marked noinline, but was called with non-scalar argument {fn.arg_names[idx]}:{arg}' + ) + + +def _get_fn_file_line(fn): + base_fn = fn + while not isinstance(base_fn, JITFunction): + base_fn = base_fn.fn + file_name = base_fn.fn.__code__.co_filename + lines, begin_line = inspect.getsourcelines(base_fn.fn) + # Match the following pattern: + # @triton.autotune(...) <- foo.__code__.co_firstlineno + # @triton.heuristics(...) + # @triton.jit + # def foo(...): <- this line is the first line + for idx, line in enumerate(lines): + if line.strip().startswith("def "): + begin_line += idx + break + return file_name, begin_line + + +_condition_types = {bool, int, type(None)} # Python types accepted for conditionals inside kernels + + +class enter_sub_region: + + def __init__(self, generator): + self.generator = generator + + def __enter__(self): + # record lscope & local_defs in the parent scope + self.liveins = self.generator.lscope.copy() + self.prev_defs = self.generator.local_defs.copy() + self.generator.local_defs = {} + self.insert_block = self.generator.builder.get_insertion_block() + self.insert_point = self.generator.builder.get_insertion_point() + return self.liveins, self.insert_block + + def __exit__(self, *args, **kwargs): + self.generator.builder.restore_insertion_point(self.insert_point) + self.generator.lscope = self.liveins + self.generator.local_defs = self.prev_defs + + +# Check if the given syntax node has an "early" return +class ContainsReturnChecker(ast.NodeVisitor): + + def __init__(self, gscope): + self.gscope = gscope + + def _visit_stmts(self, body) -> bool: + for s in body: + if self.visit(s): + return True + return False + + def _visit_function(self, fn) -> bool: + # Currently we only support JITFunctions defined in the global scope + if isinstance(fn, JITFunction) and not fn.noinline: + fn_node = fn.parse() + return ContainsReturnChecker(self.gscope).visit(fn_node) + return False + + def generic_visit(self, node) -> bool: + ret = False + for _, value in ast.iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, ast.AST): + ret = ret or self.visit(item) + elif isinstance(value, ast.AST): + ret = ret or self.visit(value) + return ret + + def visit_Attribute(self, node: ast.Attribute) -> bool: + # If the left part is a name, it's possible that + # we call triton native function or a jit function from another module. + # If the left part is not a name, it must return a tensor or a constexpr + # whose methods do not contain return statements + # e.g., (tl.load(x)).to(y) + # So we only check if the expressions within value have return or not + if isinstance(node.value, ast.Name): + if node.value.id in self.gscope: + value = self.gscope[node.value.id] + fn = getattr(value, node.attr) + return self._visit_function(fn) + return False + return self.visit(node.value) + + def visit_Name(self, node: ast.Name) -> bool: + if type(node.ctx) == ast.Store: + return False + if node.id in self.gscope: + fn = self.gscope[node.id] + return self._visit_function(fn) + return False + + def visit_Return(self, node: ast.Return) -> bool: + return True + + def visit_Assign(self, node: ast.Assign) -> bool: + # There couldn't be an early return + # x = ... + return False + + def visit_AugAssign(self, node: ast.AugAssign) -> bool: + # There couldn't be an early return + # x += ... + return False + + def visit_Module(self, node: ast.Module) -> bool: + return self._visit_stmts(node.body) + + def visit_FunctionDef(self, node: ast.FunctionDef) -> bool: + return self._visit_stmts(node.body) + + def visit_If(self, node: ast.If) -> bool: + # TODO: optimize the following case in which we actually don't have + # a return when static_cond is false: + # if dynamic_cond + # if static_cond + # func_with_return + # else + # func_without_return + ret = self._visit_stmts(node.body) + if node.orelse: + ret = ret or self._visit_stmts(node.orelse) + return ret + + def visit_IfExp(self, node: ast.IfExp) -> bool: + return self.visit(node.body) or self.visit(node.orelse) + + def visit_Call(self, node: ast.Call) -> bool: + return self.visit(node.func) + + +class CodeGenerator(ast.NodeVisitor): + + def __init__(self, context, prototype, gscope, attributes, constants, function_name, jit_fn: JITFunction, options, + codegen_fns, debug=None, module=None, is_kernel=False, function_types: Optional[Dict] = None, + noinline=False, file_name: Optional[str] = None, begin_line=0): + self.context = context + self.builder = ir.builder(context) + self.file_name = file_name + # node.lineno starts from 1, so we need to subtract 1 + self.begin_line = begin_line - 1 + self.builder.set_loc(file_name, begin_line, 0) + self.builder.options = options + # dict of functions provided by the backend. Below are the list of possible functions: + # Convert custom types not natively supported on HW. + # convert_custom_types(intput_tensor, dtype, fp_downcast_rounding=None, _builder=None) + self.builder.codegen_fns = codegen_fns + self.module = self.builder.create_module() if module is None else module + self.function_ret_types = {} if function_types is None else function_types + self.prototype = prototype + self.gscope = gscope + self.lscope = dict() + self.attributes = attributes + self.constants = constants + self.jit_fn = jit_fn + self.function_name = function_name + self.is_kernel = is_kernel + self.cur_node = None + self.debug = options.debug if debug is None else debug + self.noinline = noinline + self.scf_stack = [] + self.ret_type = None + # SSA-construction + # name => language.tensor + self.local_defs: Dict[str, tensor] = {} + self.dereference_name: Callable[[str], Any] = self._define_name_lookup() + self.fn = None + # Are we currently visiting an ast.arg's default value? These have some + # special handling. + self.visiting_arg_default_value = False + + builtin_namespace: Dict[str, Any] = {_.__name__: _ for _ in (len, list, range, float, int, isinstance, getattr)} + builtin_namespace.update(( + ('print', language.core.device_print), + ('min', language.minimum), + ('max', language.maximum), + )) + + def _unsupported(self, node, message): + return UnsupportedLanguageConstruct(self.jit_fn.src, node, message) + + def _is_constexpr_global(self, name): + absent_marker = object() + val = self.gscope.get(name, absent_marker) + if val is absent_marker: + return False + + if _is_constexpr(val): + return True + + if a := self.gscope.get("__annotations__", {}).get(name): + return _normalize_ty(a) == "constexpr" + + return False + + def _define_name_lookup(self): + + def local_lookup(name: str, absent): + # this needs to be re-fetched from `self` every time, because it gets switched occasionally + return self.lscope.get(name, absent) + + def global_lookup(name: str, absent): + val = self.gscope.get(name, absent) + # The high-level rule is that only constexpr globals are allowed. + # But actually a bunch of other things, such as module imports, are + # technically Python globals. We have to allow these too! + if (val is absent # + or name in self.builtin_namespace # + or type(val) == ModuleType # + or isinstance(val, JITFunction) # + or getattr(val, "__triton_builtin__", False) # + or getattr(val, "__module__", "").startswith("triton.language") # + or isinstance(val, language.dtype) # + or self._is_constexpr_global(name) # + # Allow accesses to globals while visiting an ast.arg + # because you should be able to do + # @triton.jit def fn(x: tl.constexpr = GLOBAL): ... + or self.visiting_arg_default_value # + or os.environ.get("TRITON_ALLOW_NON_CONSTEXPR_GLOBALS", "0") == "1"): + return val + raise NameError( + textwrap.dedent(f"""\ + Cannot access global variable {name} from within @jit'ed + function. Triton kernels can only access global variables that + are annotated as constexpr (`x: triton.language.constexpr = 42` + or `x = triton.language.constexpr(42)`). Alternatively, set the + envvar TRITON_ALLOW_NON_CONSTEXPR_GLOBALS=1, but we do not + promise to support this forever.""").replace("\n", " ")) + + absent_marker = object() + + def name_lookup(name: str) -> Any: + absent = absent_marker + for lookup_function in local_lookup, global_lookup, self.builtin_namespace.get: + value = lookup_function(name, absent) + if value is not absent: + return value + raise NameError(f'{name} is not defined') + + return name_lookup + + def set_value(self, name: str, value: Union[tensor, constexpr]) -> None: + ''' This function: + called by visit_Assign() & visit_FunctionDef() to store left value (lvalue) + 1. record local defined name (FIXME: should consider control flow) + 2. store tensor in self.lvalue + ''' + self.lscope[name] = value + self.local_defs[name] = value + + def _get_insertion_point_and_loc(self): + # XXX: this is a hack to get the location of the insertion point. + # The insertion point's location could be invalid sometimes, + # so we need to explicitly set the location + loc = self.builder.get_loc() + ip = self.builder.get_insertion_point() + return ip, loc + + def _set_insertion_point_and_loc(self, ip, loc): + self.builder.restore_insertion_point(ip) + self.builder.set_loc(loc) + + # + # AST visitor + # + def visit_compound_statement(self, stmts): + # Ensure that stmts is iterable + if not _is_list_like(stmts): + stmts = [stmts] + for stmt in stmts: + self.visit(stmt) + + # Stop parsing as soon as we hit a `return` statement; everything + # after this is dead code. + if isinstance(stmt, ast.Return): + break + + def visit_Module(self, node): + ast.NodeVisitor.generic_visit(self, node) + + def visit_List(self, node): + ctx = self.visit(node.ctx) + assert ctx is None + elts = [self.visit(elt) for elt in node.elts] + return elts + + # By design, only non-kernel functions can return + def visit_Return(self, node): + ret_value = self.visit(node.value) + # ret_block = self.builder.create_block() + # post_ret_block = self.builder.create_block() + # self.builder.create_branch(ret_block) + # self.builder.set_insertion_point_to_end(ret_block) + if ret_value is None: + self.builder.ret([]) + ret_ty = language.void + elif isinstance(ret_value, tuple): + ret_values = [language.core._to_tensor(v, self.builder) for v in ret_value] + ret_types = [v.type for v in ret_values] + self.builder.ret([v.handle for v in ret_values]) + ret_ty = tuple(ret_types) + else: + ret = language.core._to_tensor(ret_value, self.builder) + self.builder.ret([ret.handle]) + ret_ty = ret.type + # self.builder.create_branch(post_ret_block) + # self.builder.set_insertion_point_to_end(post_ret_block) + + if self.ret_type is None: + self.ret_type = ret_ty + elif self.ret_type != ret_ty: + raise TypeError(f'Inconsistent return types: {self.ret_type} and {ret_ty}') + + def visit_FunctionDef(self, node): + arg_names, kwarg_names = self.visit(node.args) + if self.fn: + raise self._unsupported(node, "nested function definition is not supported.") + # initialize defaults + for i, default_value in enumerate(node.args.defaults): + arg_node = node.args.args[-i - 1] + annotation = arg_node.annotation + name = arg_node.arg + st_target = ast.Name(id=name, ctx=ast.Store()) + if annotation is None: + init_node = ast.Assign(targets=[st_target], value=default_value) + else: + init_node = ast.AnnAssign(target=st_target, value=default_value, annotation=annotation) + + try: + assert not self.visiting_arg_default_value + self.visiting_arg_default_value = True + self.visit(init_node) + finally: + self.visiting_arg_default_value = False + + # initialize function + visibility = "public" if self.is_kernel else "private" + self.fn = self.builder.get_or_insert_function(self.module, self.function_name, + self.prototype.to_ir(self.builder), visibility, self.noinline) + self.module.push_back(self.fn) + entry = self.fn.add_entry_block() + arg_values = [] + idx = 0 + for i, arg_name in enumerate(arg_names): + if i in self.constants: + cst = self.constants[i] + if not _is_constexpr(cst): + cst = constexpr(self.constants[i]) + arg_values.append(cst) + continue + else: + if i in self.attributes: + for name, value in self.attributes[i]: + self.fn.set_arg_attr(idx, name, value) + arg_values.append(tensor(self.fn.args(idx), self.prototype.param_types[idx])) + idx += 1 + + insert_pt = self.builder.get_insertion_block() + for arg_name, arg_value in zip(arg_names, arg_values): + self.set_value(arg_name, arg_value) + self.builder.set_insertion_point_to_start(entry) + # visit function body + self.visit_compound_statement(node.body) + # finalize function + if self.ret_type is None or self.ret_type == language.void: + self.ret_type = language.void + self.builder.ret([]) + else: + # update return type + if isinstance(self.ret_type, tuple): + self.prototype.ret_types = list(self.ret_type) + self.fn.reset_type(self.prototype.to_ir(self.builder)) + else: + self.prototype.ret_types = [self.ret_type] + self.fn.reset_type(self.prototype.to_ir(self.builder)) + if insert_pt: + self.builder.set_insertion_point_to_end(insert_pt) + # Remove dead code + self.fn.finalize() + + def visit_arguments(self, node): + arg_names = [] + for arg in node.args: + arg_names += [self.visit(arg)] + kwarg_names = self.visit(node.kwarg) + return arg_names, kwarg_names + + def visit_arg(self, node): + ast.NodeVisitor.generic_visit(self, node) + return node.arg + + def visit_AnnAssign(self, node): + # extract attributes + annotation = self.visit(node.annotation) + target = self.visit(node.target) + value = self.visit(node.value) + # constexpr + if annotation == constexpr: + if target in self.lscope: + raise ValueError(f'{target} is already defined.' + f' constexpr cannot be reassigned.') + if not _is_constexpr(value): + value = constexpr(value) + self.lscope[target] = value + return self.lscope[target] + # default: call visit_Assign + return self.visit_Assign(node) + + def visit_Assign(self, node): + _names = [] + for target in node.targets: + _names += [self.visit(target)] + if len(_names) > 1: + raise self._unsupported(node, "simultaneous multiple assignment is not supported.") + names = _names[0] + values = self.visit(node.value) + if not _is_list_like(names): + names = [names] + if not _is_list_like(values): + values = [values] + native_nontensor_types = (language.dtype, ) + for name, value in zip(names, values): + # by default, constexpr are assigned into python variable + value = _unwrap_if_constexpr(value) + if value is not None and \ + not _is_triton_tensor(value) and \ + not isinstance(value, native_nontensor_types): + value = language.core._to_tensor(value, self.builder) + self.set_value(name, value) + + def visit_AugAssign(self, node): + name = node.target.id + lhs = ast.Name(id=name, ctx=ast.Load()) + rhs = ast.BinOp(lhs, node.op, node.value) + assign = ast.Assign(targets=[node.target], value=rhs) + self.visit(assign) + return self.dereference_name(name) + + def visit_Name(self, node): + if type(node.ctx) == ast.Store: + return node.id + return self.dereference_name(node.id) + + def visit_Store(self, node): + ast.NodeVisitor.generic_visit(self, node) + + def visit_Load(self, node): + ast.NodeVisitor.generic_visit(self, node) + + def visit_Tuple(self, node): + args = [self.visit(x) for x in node.elts] + return tuple(args) + + def _apply_binary_method(self, method_name, lhs, rhs): + # TODO: raise something meaningful if getattr fails below, esp for reverse method + if _is_triton_tensor(lhs): + return getattr(lhs, method_name)(rhs, _builder=self.builder) + if _is_triton_tensor(rhs): + reverse_method_name = re.sub(r"__(.*)__", r"__r\1__", method_name) + return getattr(rhs, reverse_method_name)(lhs, _builder=self.builder) + return getattr(lhs, method_name)(rhs) + + def visit_BinOp(self, node): + lhs = self.visit(node.left) + rhs = self.visit(node.right) + method_name = self._method_name_for_bin_op.get(type(node.op)) + if method_name is None: + raise self._unsupported(node, + "AST binary operator '{}' is not (currently) implemented.".format(node.op.__name__)) + return self._apply_binary_method(method_name, lhs, rhs) + + _method_name_for_bin_op: Dict[Type[ast.operator], str] = { + ast.Add: '__add__', + ast.Sub: '__sub__', + ast.Mult: '__mul__', + ast.Div: '__truediv__', + ast.FloorDiv: '__floordiv__', + ast.Mod: '__mod__', + ast.Pow: '__pow__', + ast.LShift: '__lshift__', + ast.RShift: '__rshift__', + ast.BitAnd: '__and__', + ast.BitOr: '__or__', + ast.BitXor: '__xor__', + } + + def visit_then_else_blocks(self, node, liveins, then_block, else_block): + # then block + self.builder.set_insertion_point_to_start(then_block) + self.visit_compound_statement(node.body) + then_block = self.builder.get_insertion_block() + then_defs = self.local_defs.copy() + # else block + else_defs = {} + if node.orelse: + self.builder.set_insertion_point_to_start(else_block) + self.lscope = liveins.copy() + self.local_defs = {} + self.visit_compound_statement(node.orelse) + else_defs = self.local_defs.copy() + else_block = self.builder.get_insertion_block() + + # update block arguments + names = [] + ret_types = [] + ir_ret_types = [] + # variables in livein whose value is updated in `if` + for name in liveins: + # check type + for defs, block_name in [(then_defs, 'then'), (else_defs, 'else')]: + if name in defs: + assert defs[name].type == liveins[name].type, \ + f'initial value for `{name}` is of type {liveins[name].type}, '\ + f'but the {block_name} block redefines it as {defs[name].type}' + if name in then_defs or name in else_defs: + names.append(name) + ret_types.append(then_defs[name].type if name in then_defs else else_defs[name].type) + ir_ret_types.append(then_defs[name].handle.get_type() if name in + then_defs else else_defs[name].handle.get_type()) + # variable defined in then but not in else + if name in then_defs and name not in else_defs: + else_defs[name] = liveins[name] + # variable defined in else but not in then + if name in else_defs and name not in then_defs: + then_defs[name] = liveins[name] + # variables that are both in then and else but not in liveins + # TODO: could probably be cleaned up + for name in then_defs.keys() & else_defs.keys(): + if name in names: + continue + then_ty = then_defs[name].type + else_ty = else_defs[name].type + assert then_ty == else_ty, \ + f'mismatched type for {name} between then block ({then_ty}) '\ + f'and else block ({else_ty})' + names.append(name) + ret_types.append(then_ty) + ir_ret_types.append(then_defs[name].handle.get_type()) + + return then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types + + def visit_if_top_level(self, cond, node): + has_endif_block = True + with enter_sub_region(self) as sr: + liveins, ip_block = sr + then_block = self.builder.create_block() + else_block = self.builder.create_block() + # create basic-block after conditional + endif_block = self.builder.create_block() + # create branch + self.builder.set_insertion_point_to_end(ip_block) + self.builder.create_cond_branch(cond.handle, then_block, else_block) + # visit then and else blocks + then_defs, else_defs, then_block, else_block, names, ret_types, ir_ret_types = \ + self.visit_then_else_blocks(node, liveins, then_block, else_block) + # then terminator + self.builder.set_insertion_point_to_end(then_block) + if then_block.has_return() and else_block.has_return(): + has_endif_block = False + endif_block.erase() + if not then_block.has_terminator() and has_endif_block: + self.builder.create_branch(endif_block, [then_defs[n].handle for n in names]) + # else terminator + self.builder.set_insertion_point_to_end(else_block) + if not else_block.has_terminator() and has_endif_block: + self.builder.create_branch(endif_block, [else_defs[n].handle for n in names]) + if has_endif_block: + for ty in ir_ret_types: + endif_block.add_argument(ty) + if has_endif_block: + # change block + self.builder.set_insertion_point_to_start(endif_block) + # update value + for i, name in enumerate(names): + new_tensor = language.core.tensor(endif_block.arg(i), ret_types[i]) + self.set_value(name, new_tensor) + + # TODO: refactor + def visit_if_scf(self, cond, node): + with enter_sub_region(self) as sr: + liveins, _ = sr + ip, last_loc = self._get_insertion_point_and_loc() + then_block = self.builder.create_block() + else_block = self.builder.create_block() if node.orelse else None + then_defs, else_defs, then_block, else_block, names, ret_types, _ = \ + self.visit_then_else_blocks(node, liveins, then_block, else_block) + # create if op + self._set_insertion_point_and_loc(ip, last_loc) + if_op = self.builder.create_if_op([ty.to_ir(self.builder) for ty in ret_types], cond.handle, True) + then_block.merge_block_before(if_op.get_then_block()) + self.builder.set_insertion_point_to_end(if_op.get_then_block()) + if len(names) > 0: + self.builder.create_yield_op([then_defs[n].handle for n in names]) + if not node.orelse: + else_block = if_op.get_else_block() + else: + else_block.merge_block_before(if_op.get_else_block()) + self.builder.set_insertion_point_to_end(if_op.get_else_block()) + if len(names) > 0: + self.builder.create_yield_op([else_defs[n].handle for n in names]) + # update values + for i, name in enumerate(names): + new_tensor = language.core.tensor(if_op.get_result(i), ret_types[i]) + self.set_value(name, new_tensor) + + def visit_If(self, node): + cond = self.visit(node.test) + if _is_triton_tensor(cond): + cond = cond.to(language.int1, _builder=self.builder) + contains_return = ContainsReturnChecker(self.gscope).visit(node) + if self.scf_stack and contains_return: + raise self._unsupported( + node, "Cannot have `return` statements inside `while` or `for` statements in triton " + "(note that this also applies to `return` statements that are inside functions " + "transitively called from within `while`/`for` statements)") + elif self.scf_stack or not contains_return: + self.visit_if_scf(cond, node) + else: + self.visit_if_top_level(cond, node) + else: + cond = _unwrap_if_constexpr(cond) + # not isinstance - we insist the real thing, no subclasses and no ducks + if type(cond) not in _condition_types: + raise self._unsupported( + node, "`if` conditionals can only accept values of type {{{}}}, not objects of type {}".format( + ', '.join(_.__name__ for _ in _condition_types), + type(cond).__name__)) + if cond: + self.visit_compound_statement(node.body) + else: + self.visit_compound_statement(node.orelse) + + def visit_IfExp(self, node): + cond = self.visit(node.test) + if _is_triton_tensor(cond): + cond = cond.to(language.int1, _builder=self.builder) + # TODO: Deal w/ more complicated return types (e.g tuple) + with enter_sub_region(self): + ip, last_loc = self._get_insertion_point_and_loc() + + then_block = self.builder.create_block() + self.builder.set_insertion_point_to_start(then_block) + then_val = language.core._to_tensor(self.visit(node.body), self.builder) + then_block = self.builder.get_insertion_block() + + else_block = self.builder.create_block() + self.builder.set_insertion_point_to_start(else_block) + # do not need to reset lscope since + # ternary expressions cannot define new variables + else_val = language.core._to_tensor(self.visit(node.orelse), self.builder) + else_block = self.builder.get_insertion_block() + + self._set_insertion_point_and_loc(ip, last_loc) + + assert then_val.type == else_val.type, \ + f'ternary expression with dynamic condition has inconsistent types {then_val.type} and {else_val.type}' + ret_type = then_val.type + + ret_type_ir = [ret_type.to_ir(self.builder)] if ret_type != language.void else [] + if_op = self.builder.create_if_op(ret_type_ir, cond.handle, True) + then_block.merge_block_before(if_op.get_then_block()) + if ret_type_ir: + self.builder.set_insertion_point_to_end(if_op.get_then_block()) + self.builder.create_yield_op([then_val.handle]) + + self.builder.set_insertion_point_to_end(if_op.get_then_block()) + else_block.merge_block_before(if_op.get_else_block()) + if ret_type_ir: + self.builder.set_insertion_point_to_end(if_op.get_else_block()) + self.builder.create_yield_op([else_val.handle]) + return language.core.tensor(if_op.get_result(0), ret_type) if ret_type_ir else None + else: + cond = _unwrap_if_constexpr(cond) + + # not isinstance - we insist the real thing, no subclasses and no ducks + if type(cond) not in _condition_types: + raise self._unsupported( + node, "`if` conditionals can only accept values of type {{{}}}, not objects of type {}".format( + ', '.join(_.__name__ for _ in _condition_types), + type(cond).__name__)) + if cond: + return self.visit(node.body) + else: + return self.visit(node.orelse) + + def visit_Pass(self, node): + pass + + def visit_Compare(self, node): + if not (len(node.comparators) == 1 and len(node.ops) == 1): + raise self._unsupported(node, "simultaneous multiple comparison is not supported") + lhs = self.visit(node.left) + rhs = self.visit(node.comparators[0]) + lhs_value = _unwrap_if_constexpr(lhs) + rhs_value = _unwrap_if_constexpr(rhs) + if type(node.ops[0]) == ast.Is: + return constexpr(lhs_value is rhs_value) + if type(node.ops[0]) == ast.IsNot: + return constexpr(lhs_value is not rhs_value) + method_name = self._method_name_for_comp_op.get(type(node.ops[0])) + if method_name is None: + raise self._unsupported( + node, "AST comparison operator '{}' is not (currently) implemented.".format(node.ops[0].__name__)) + return self._apply_binary_method(method_name, lhs, rhs) + + _method_name_for_comp_op: Dict[Type[ast.cmpop], str] = { + ast.Eq: '__eq__', ast.NotEq: '__ne__', ast.Lt: '__lt__', ast.LtE: '__le__', ast.Gt: '__gt__', ast.GtE: '__ge__' + } + + def visit_UnaryOp(self, node): + operand = self.visit(node.operand) + fn = self._method_name_for_unary_op.get(type(node.op)) + if fn is None: + raise self._unsupported(node, f"AST unary operator '{node.op.__name__}' is not (currently) implemented.") + if _is_triton_tensor(operand): + return getattr(operand, fn)(_builder=self.builder) + try: + return getattr(operand, fn)() + except AttributeError: + raise self._unsupported( + node, f"AST unary operator '{fn}' is not (currently) implemented on type {type(operand).__name__}") + + _method_name_for_unary_op: Dict[Type[ast.unaryop], str] = { + ast.USub: '__neg__', ast.UAdd: '__pos__', ast.Not: '__not__', ast.Invert: '__invert__' + } + + def visit_While(self, node): + with enter_sub_region(self) as sr: + liveins, insert_block = sr + ip, last_loc = self._get_insertion_point_and_loc() + + # loop body (the after region) + # loop_block = self.builder.create_block() + dummy = self.builder.create_block() + self.builder.set_insertion_point_to_start(dummy) + self.scf_stack.append(node) + self.visit_compound_statement(node.body) + self.scf_stack.pop() + loop_defs = self.local_defs + dummy.erase() + + # collect loop-carried values + names = [] + ret_types = [] + init_args = [] + for name in loop_defs: + if name in liveins: + # We should not def new constexpr + assert _is_triton_tensor(loop_defs[name]), f'cannot reassign constxpr {name} in the loop' + assert _is_triton_tensor(liveins[name]), f'cannot reasign constexpr {name} in the loop' + assert loop_defs[name].type == liveins[name].type, \ + f'Loop-carried variable {name} has initial type {liveins[name].type} '\ + f'but is re-assigned to {loop_defs[name].type} in loop! '\ + f'Please make sure that the type stays consistent.' + + # these are loop-carried values + names.append(name) + ret_types.append(loop_defs[name].type) + init_args.append(liveins[name]) + + self._set_insertion_point_and_loc(ip, last_loc) + while_op = self.builder.create_while_op([ty.to_ir(self.builder) for ty in ret_types], + [arg.handle for arg in init_args]) + # merge the condition region + before_block = self.builder.create_block_with_parent(while_op.get_before(), + [ty.to_ir(self.builder) for ty in ret_types]) + self.builder.set_insertion_point_to_start(before_block) + for i, name in enumerate(names): + self.lscope[name] = language.core.tensor(before_block.arg(i), ret_types[i]) + self.local_defs[name] = self.lscope[name] + cond = self.visit(node.test) + self.builder.set_insertion_point_to_end(before_block) + # create ConditionOp: e.g., scf.condition(%cond) %arg0, %arg1, ... + self.builder.create_condition_op(cond.handle, [before_block.arg(i) for i in range(len(init_args))]) + # merge the loop body + after_block = self.builder.create_block_with_parent(while_op.get_after(), + [ty.to_ir(self.builder) for ty in ret_types]) + + # generate loop body + self.builder.set_insertion_point_to_start(after_block) + for i, name in enumerate(names): + self.lscope[name] = language.core.tensor(after_block.arg(i), ret_types[i]) + self.local_defs[name] = self.lscope[name] + self.scf_stack.append(node) + self.visit_compound_statement(node.body) + self.scf_stack.pop() + loop_defs = self.local_defs + yields = [] + for name in loop_defs: + if name in liveins: + yields.append(loop_defs[name]) + self.builder.create_yield_op([y.handle for y in yields]) + + # WhileOp defines new values, update the symbol table (lscope, local_defs) + for i, name in enumerate(names): + new_def = language.core.tensor(while_op.get_result(i), ret_types[i]) + self.lscope[name] = new_def + self.local_defs[name] = new_def + + for stmt in node.orelse: + assert False, "Not implemented" + ast.NodeVisitor.generic_visit(self, stmt) + + def visit_Subscript(self, node): + assert node.ctx.__class__.__name__ == "Load" + lhs = self.visit(node.value) + slices = self.visit(node.slice) + if _is_triton_tensor(lhs): + return lhs.__getitem__(slices, _builder=self.builder) + return lhs[slices] + + def visit_ExtSlice(self, node): + return [self.visit(dim) for dim in node.dims] + + def visit_For(self, node): + IteratorClass = self.visit(node.iter.func) + iter_args = [self.visit(arg) for arg in node.iter.args] + iter_kwargs = dict(self.visit(keyword) for keyword in node.iter.keywords) + if IteratorClass == language.static_range: + iterator = IteratorClass(*iter_args, **iter_kwargs) + static_range = range(iterator.start.value, iterator.end.value, iterator.step.value) + for i in static_range: + self.lscope[node.target.id] = constexpr(i) + self.visit_compound_statement(node.body) + for stmt in node.orelse: + ast.NodeVisitor.generic_visit(self, stmt) + return + num_stages = None + if IteratorClass is language.range: + iterator = IteratorClass(*iter_args, **iter_kwargs) + # visit iterator arguments + # note: only `range` iterator is supported now + # collect lower bound (lb), upper bound (ub), and step + lb = iterator.start + ub = iterator.end + step = iterator.step + num_stages = iterator.num_stages + elif IteratorClass is range: + # visit iterator arguments + # note: only `range` iterator is supported now + # collect lower bound (lb), upper bound (ub), and step + lb = iter_args[0] if len(iter_args) > 1 else self.visit(ast.Num(0)) + ub = iter_args[1] if len(iter_args) > 1 else self.visit(node.iter.args[0]) + step = iter_args[2] if len(iter_args) > 2 else self.visit(ast.Num(1)) + else: + raise RuntimeError('Only `range` and `static_range` iterators are currently supported') + # handle negative constant step (not supported by scf.for in MLIR) + negative_step = False + if _is_constexpr(step) and step.value < 0: + step = constexpr(-step.value) + negative_step = True + lb, ub = ub, lb + lb = language.core._to_tensor(lb, self.builder) + ub = language.core._to_tensor(ub, self.builder) + step = language.core._to_tensor(step, self.builder) + # induction variable type + if not lb.dtype.is_int() or not ub.dtype.is_int() or not step.dtype.is_int(): + raise TypeError(f"For loop bounds and step must all be ints, are ({lb.dtype}, {ub.dtype}, {step.dtype})") + iv_type = language.semantic.integer_promote_impl(lb.dtype, ub.dtype) + iv_type = language.semantic.integer_promote_impl(iv_type, step.dtype) + iv_ir_type = iv_type.to_ir(self.builder) + iv_is_signed = iv_type.int_signedness == language.core.dtype.SIGNEDNESS.SIGNED + # lb/ub/step might be constexpr, we need to cast them to tensor + lb = lb.handle + ub = ub.handle + step = step.handle + # ForOp can only accept IndexType as lb/ub/step. Cast integer to Index + lb = self.builder.create_int_cast(lb, iv_ir_type, iv_is_signed) + ub = self.builder.create_int_cast(ub, iv_ir_type, iv_is_signed) + step = self.builder.create_int_cast(step, iv_ir_type, iv_is_signed) + # Create placeholder for the loop induction variable + iv = self.builder.create_undef(iv_ir_type) + self.set_value(node.target.id, language.core.tensor(iv, iv_type)) + + with enter_sub_region(self) as sr: + liveins, insert_block = sr + ip, last_loc = self._get_insertion_point_and_loc() + + # create loop body block + block = self.builder.create_block() + self.builder.set_insertion_point_to_start(block) + # dry visit loop body + self.scf_stack.append(node) + self.visit_compound_statement(node.body) + self.scf_stack.pop() + block.erase() + + # If a variable (name) is defined in both its parent & itself, then it's + # a loop-carried variable. (They must be of the same type) + init_args = [] + yields = [] + names = [] + for name in self.local_defs: + if name in liveins: + assert _is_triton_tensor(self.local_defs[name]), f'{name} is not tensor' + assert _is_triton_tensor(liveins[name]) + assert self.local_defs[name].type == liveins[name].type, \ + f'Loop-carried variable {name} has initial type {liveins[name].type} '\ + f'but is re-assigned to {self.local_defs[name].type} in loop! '\ + f'Please make sure that the type stays consistent.' + + names.append(name) + init_args.append(language.core._to_tensor(liveins[name], self.builder)) + yields.append(language.core._to_tensor(self.local_defs[name], self.builder)) + + # create ForOp + self._set_insertion_point_and_loc(ip, last_loc) + for_op = self.builder.create_for_op(lb, ub, step, [arg.handle for arg in init_args]) + if num_stages is not None: + for_op.set_attr("tt.num_stages", self.builder.get_int32_attr(num_stages)) + + self.scf_stack.append(node) + self.builder.set_insertion_point_to_start(for_op.get_body(0)) + # reset local scope to not pick up local defs from the previous dry run. + self.lscope = liveins.copy() + self.local_defs = {} + for i, name in enumerate(names): + self.set_value(name, language.core.tensor(for_op.get_body(0).arg(i + 1), yields[i].type)) + self.visit_compound_statement(node.body) + self.scf_stack.pop() + yields = [] + for name in self.local_defs: + if name in liveins: + yields.append(language.core._to_tensor(self.local_defs[name], self.builder)) + + # create YieldOp + if len(yields) > 0: + self.builder.create_yield_op([y.handle for y in yields]) + for_op_region = for_op.get_body(0).get_parent() + assert for_op_region.size() == 1, "We use SCF, so the loop body should only have one block" + + # update induction variable with actual value, and replace all uses + self.builder.set_insertion_point_to_start(for_op.get_body(0)) + iv = for_op.get_induction_var() + if negative_step: + iv = self.builder.create_sub(ub, iv) + iv = self.builder.create_add(iv, lb) + self.lscope[node.target.id].handle.replace_all_uses_with(iv) + self.set_value(node.target.id, language.core.tensor(iv, iv_type)) + + # update lscope & local_defs (ForOp defines new values) + for i, name in enumerate(names): + self.set_value(name, language.core.tensor(for_op.get_result(i), yields[i].type)) + + for stmt in node.orelse: + assert False, "Don't know what to do with else after for" + ast.NodeVisitor.generic_visit(self, stmt) + + def visit_Slice(self, node): + lower = self.visit(node.lower) + upper = self.visit(node.upper) + step = self.visit(node.step) + return slice(lower, upper, step) + + def visit_Index(self, node): + return self.visit(node.value) + + def visit_keyword(self, node) -> Tuple[str, Any]: + return node.arg, self.visit(node.value) + + def visit_Assert(self, node) -> Any: + if not self.debug: + return + test = self.visit(node.test) + msg = self.visit(node.msg) if node.msg is not None else "" + # Convert assert to triton's device_assert which happens on the device + return language.core.device_assert(test, msg, _builder=self.builder) + + def call_JitFunction(self, fn: JITFunction, args, kwargs): + args = inspect.getcallargs(fn.fn, *args, **kwargs) + args = [args[name] for name in fn.arg_names] + args = [arg if _is_triton_tensor(arg) else constexpr(arg) for arg in args] + # generate function def + attributes = dict() + constexprs = [i for i, arg in enumerate(args) if _is_constexpr(arg)] + constants = {i: args[i] for i in constexprs} + # generate call + args = [None if i in constexprs else arg for i, arg in enumerate(args)] + arg_vals = [arg.handle for arg in args if arg is not None] + arg_types = [arg.type for arg in args if arg is not None] + fn_name = mangle_fn(fn.__name__, arg_types, constants) + # generate function def if necessary + if not self.module.has_function(fn_name): + prototype = language.function_type([], arg_types) + gscope = fn.__globals__ + # If the callee is not set, we use the same debug setting as the caller + file_name, begin_line = _get_fn_file_line(fn) + debug = self.debug if fn.debug is None else fn.debug + generator = CodeGenerator(self.context, prototype, gscope, attributes, constants, module=self.module, + jit_fn=fn, function_name=fn_name, function_types=self.function_ret_types, + noinline=fn.noinline, file_name=file_name, begin_line=begin_line, + options=self.builder.options, codegen_fns=self.builder.codegen_fns, debug=debug) + try: + generator.visit(fn.parse()) + except Exception as e: + # Wrap the error in the callee with the location of the call. + raise CompilationError(self.jit_fn.src, self.cur_node, None) from e + + callee_ret_type = generator.ret_type + self.function_ret_types[fn_name] = callee_ret_type + else: + callee_ret_type = self.function_ret_types[fn_name] + symbol = self.module.get_function(fn_name) + call_op = self.builder.call(symbol, arg_vals) + if call_op.get_num_results() == 0 or callee_ret_type is None: + return None + elif call_op.get_num_results() == 1: + return tensor(call_op.get_result(0), callee_ret_type) + else: + # should return a tuple of tl.tensor + results = [] + for i in range(call_op.get_num_results()): + results.append(tensor(call_op.get_result(i), callee_ret_type[i])) + return tuple(results) + + def visit_Call(self, node): + fn = _unwrap_if_constexpr(self.visit(node.func)) + static_implementation = self.statically_implemented_functions.get(fn) + if static_implementation is not None: + return static_implementation(self, node) + + kws = dict(self.visit(keyword) for keyword in node.keywords) + args = [self.visit(arg) for arg in node.args] + if fn is language.core.device_assert: # TODO: this should not be so hardcoded + if not self.debug: + return + if isinstance(fn, JITFunction): + _check_fn_args(node, fn, args) + return self.call_JitFunction(fn, args, kws) + if (hasattr(fn, '__self__') and _is_triton_tensor(fn.__self__)) or language.core.is_builtin(fn): + extra_kwargs = dict(_builder=self.builder) + sig = inspect.signature(fn) + if '_generator' in sig.parameters: + extra_kwargs['_generator'] = self + try: + return fn(*args, **extra_kwargs, **kws) + except Exception as e: + # Normally when we raise a CompilationError, we raise it as + # `from None`, because the original fileline from the exception + # is not relevant (and often points into code_generator.py + # itself). But when calling a function, we raise as `from e` to + # preserve the traceback of the original error, which may e.g. + # be in core.py. + raise CompilationError(self.jit_fn.src, node, None) from e + + if fn in self.builtin_namespace.values(): + args = map(_unwrap_if_constexpr, args) + return fn(*args, **kws) + + def visit_Constant(self, node): + return constexpr(node.value) + + def visit_BoolOp(self, node: ast.BoolOp): + if len(node.values) != 2: + raise self._unsupported( + node, "chained boolean operators (A or B or C) are not supported; use parentheses to split the chain.") + lhs = self.visit(node.values[0]) + rhs = self.visit(node.values[1]) + method_name = self._method_name_for_bool_op.get(type(node.op)) + if method_name is None: + raise self._unsupported( + node, "AST boolean operator '{}' is not (currently) implemented.".format(node.op.__name__)) + return self._apply_binary_method(method_name, lhs, rhs) + + _method_name_for_bool_op: Dict[Type[ast.boolop], str] = {ast.And: 'logical_and', ast.Or: 'logical_or'} + + if sys.version_info < (3, 8): + + def visit_NameConstant(self, node): + return constexpr(node.value) + + def visit_Num(self, node): + return constexpr(node.n) + + def visit_Str(self, node): + return constexpr(ast.literal_eval(node)) + + def visit_Attribute(self, node): + lhs = self.visit(node.value) + if _is_triton_tensor(lhs): + if node.attr == "T": + return language.semantic.permute(lhs, (1, 0), builder=self.builder) + return getattr(lhs, node.attr) + + def visit_Expr(self, node): + ast.NodeVisitor.generic_visit(self, node) + + def visit_NoneType(self, node): + return None + + def visit_JoinedStr(self, node): + values = list(node.values) + for i, value in enumerate(values): + if isinstance(value, ast.Constant): + values[i] = str(value.value) + elif isinstance(value, ast.FormattedValue): + conversion_code = value.conversion + evaluated = self.visit(value.value) + if not _is_constexpr(evaluated): + raise self._unsupported( + node, + "Cannot evaluate f-string containing non-constexpr conversion values, found conversion of type " + + str(type(evaluated))) + values[i] = ("{}" if conversion_code < 0 else "{!" + chr(conversion_code) + "}").format(evaluated.value) + else: + raise AssertionError("encountered unexpected node of type {} in a JoinedStr node".format(type(value))) + return ''.join(values) + + def visit(self, node): + if node is None: + return + with warnings.catch_warnings(): + # The ast library added visit_Constant and deprecated some other + # methods but we can't move to that without breaking Python 3.6 and 3.7. + warnings.simplefilter("ignore", DeprecationWarning) # python 3.9 + warnings.simplefilter("ignore", PendingDeprecationWarning) # python 3.8 + last_node = self.cur_node + last_loc = self.builder.get_loc() + self.cur_node = node + if hasattr(node, 'lineno') and hasattr(node, 'col_offset'): + self.builder.set_loc(self.file_name, self.begin_line + node.lineno, node.col_offset) + last_loc = self.builder.get_loc() + try: + ret = super().visit(node) + except CompilationError: + raise + except Exception as e: + # Wrap the error in a CompilationError which contains the source + # of the @jit function. + raise CompilationError(self.jit_fn.src, self.cur_node, repr(e)) from None + + # Reset the location to the last one before the visit + if last_loc: + self.cur_node = last_node + self.builder.set_loc(last_loc) + return ret + + def generic_visit(self, node): + raise self._unsupported(node, "unsupported AST node type: {}".format(type(node).__name__)) + + def execute_static_assert(self, node: ast.Call) -> None: + arg_count = len(node.args) + if not (0 < arg_count <= 2) or len(node.keywords): + raise TypeError("`static_assert` requires one or two positional arguments only") + + passed = _unwrap_if_constexpr(self.visit(node.args[0])) + if not isinstance(passed, bool): + raise NotImplementedError( + "Assertion condition could not be determined at compile-time. Make sure that it depends only on `constexpr` values" + ) + if not passed: + if arg_count == 1: + message = "" + else: + try: + message = self.visit(node.args[1]) + except Exception as e: + message = "" + + raise CompileTimeAssertionFailure(self.jit_fn.src, node, _unwrap_if_constexpr(message)) + return None + + def static_executor(python_fn): + + def ret(self, node: ast.Call): + kws = { + name: _unwrap_if_constexpr(value) + for name, value in (self.visit(keyword) for keyword in node.keywords) + } + args = [_unwrap_if_constexpr(self.visit(arg)) for arg in node.args] + return constexpr(python_fn(*args, **kws)) + + return ret + + statically_implemented_functions: Dict[object, Callable[[ast.Call], Any]] = { + language.core.static_assert: execute_static_assert, + language.core.static_print: static_executor(print), + int: static_executor(int), + len: static_executor(len), + } + + +def kernel_suffix(signature, specialization): + # suffix format: + # <'c' if equal to 1><'d' if divisible by 16><'e' if divisible by 8> + suffix = '' + for i, _ in enumerate(signature): + suffix += str(i) + if i in specialization.equal_to_1: + suffix += 'c' + if i in specialization.divisible_by_16: + suffix += 'd' + return suffix + + +def ast_to_ttir(fn, specialization, context, options, codegen_fns): + attrs = specialization.attrs + # create kernel prototype + cst_key = lambda i: fn.arg_names.index(i) if isinstance(i, str) else i + constants = {cst_key(key): value for key, value in specialization.constants.items()} + # visit kernel AST + gscope = fn.__globals__.copy() + function_name = fn.repr(specialization) + tys = list(specialization.signature.values()) + new_constants = {k: True if k in tys and tys[k] == "i1" else 1 for k in attrs.equal_to_1} + new_attrs = {k: [("tt.divisibility", 16)] for k in attrs.divisible_by_16} + + all_constants = constants.copy() + all_constants.update(new_constants) + arg_types = [str_to_ty(v) for k, v in specialization.signature.items() if k not in specialization.constants] + file_name, begin_line = _get_fn_file_line(fn) + + prototype = language.function_type([], arg_types) + generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants, function_name=function_name, + jit_fn=fn, attributes=new_attrs, is_kernel=True, file_name=file_name, + begin_line=begin_line, options=options, codegen_fns=codegen_fns) + generator.visit(fn.parse()) + + ret = generator.module + # module takes ownership of the context + ret.context = context + return ret