Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_basic.py +0 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py +527 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py +49 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_data.py +725 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py +45 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py +85 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py +136 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py +70 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py +212 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_round.py +18 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py +142 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py +61 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py +61 -0
- parrot/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py +385 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_native.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mul_compositeexplicitautograd_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_cpu_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeimplicitautograd_dispatch.h +26 -0
- vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/METADATA +159 -0
- vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/RECORD +101 -0
- vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/REQUESTED +0 -0
- vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/WHEEL +5 -0
- vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/top_level.txt +1 -0
- vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/INSTALLER +1 -0
- vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/METADATA +360 -0
- vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/RECORD +43 -0
- vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/REQUESTED +0 -0
- vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/WHEEL +4 -0
- vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/licenses/LICENSE +21 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/__init__.py +12 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/_core.py +38 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json +58 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content +17 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core +51 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format +14 -0
- vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation +14 -0
parrot/lib/python3.10/site-packages/scipy/special/tests/test_basic.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdflib.py
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test cdflib functions versus mpmath, if available.
|
| 3 |
+
|
| 4 |
+
The following functions still need tests:
|
| 5 |
+
|
| 6 |
+
- ncfdtr
|
| 7 |
+
- ncfdtri
|
| 8 |
+
- ncfdtridfn
|
| 9 |
+
- ncfdtridfd
|
| 10 |
+
- ncfdtrinc
|
| 11 |
+
- nbdtrik
|
| 12 |
+
- nbdtrin
|
| 13 |
+
- pdtrik
|
| 14 |
+
- nctdtr
|
| 15 |
+
- nctdtrit
|
| 16 |
+
- nctdtridf
|
| 17 |
+
- nctdtrinc
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
import itertools
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
from numpy.testing import assert_equal, assert_allclose
|
| 24 |
+
import pytest
|
| 25 |
+
|
| 26 |
+
import scipy.special as sp
|
| 27 |
+
from scipy.special._testutils import (
|
| 28 |
+
MissingModule, check_version, FuncData)
|
| 29 |
+
from scipy.special._mptestutils import (
|
| 30 |
+
Arg, IntArg, get_args, mpf2float, assert_mpmath_equal)
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
import mpmath
|
| 34 |
+
except ImportError:
|
| 35 |
+
mpmath = MissingModule('mpmath')
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ProbArg:
|
| 39 |
+
"""Generate a set of probabilities on [0, 1]."""
|
| 40 |
+
|
| 41 |
+
def __init__(self):
|
| 42 |
+
# Include the endpoints for compatibility with Arg et. al.
|
| 43 |
+
self.a = 0
|
| 44 |
+
self.b = 1
|
| 45 |
+
|
| 46 |
+
def values(self, n):
|
| 47 |
+
"""Return an array containing approximately n numbers."""
|
| 48 |
+
m = max(1, n//3)
|
| 49 |
+
v1 = np.logspace(-30, np.log10(0.3), m)
|
| 50 |
+
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
|
| 51 |
+
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
|
| 52 |
+
v = np.r_[v1, v2, v3]
|
| 53 |
+
return np.unique(v)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class EndpointFilter:
|
| 57 |
+
def __init__(self, a, b, rtol, atol):
|
| 58 |
+
self.a = a
|
| 59 |
+
self.b = b
|
| 60 |
+
self.rtol = rtol
|
| 61 |
+
self.atol = atol
|
| 62 |
+
|
| 63 |
+
def __call__(self, x):
|
| 64 |
+
mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol
|
| 65 |
+
mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol
|
| 66 |
+
return np.where(mask1 | mask2, False, True)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class _CDFData:
|
| 70 |
+
def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True,
|
| 71 |
+
dps=20, n=5000, rtol=None, atol=None,
|
| 72 |
+
endpt_rtol=None, endpt_atol=None):
|
| 73 |
+
self.spfunc = spfunc
|
| 74 |
+
self.mpfunc = mpfunc
|
| 75 |
+
self.index = index
|
| 76 |
+
self.argspec = argspec
|
| 77 |
+
self.spfunc_first = spfunc_first
|
| 78 |
+
self.dps = dps
|
| 79 |
+
self.n = n
|
| 80 |
+
self.rtol = rtol
|
| 81 |
+
self.atol = atol
|
| 82 |
+
|
| 83 |
+
if not isinstance(argspec, list):
|
| 84 |
+
self.endpt_rtol = None
|
| 85 |
+
self.endpt_atol = None
|
| 86 |
+
elif endpt_rtol is not None or endpt_atol is not None:
|
| 87 |
+
if isinstance(endpt_rtol, list):
|
| 88 |
+
self.endpt_rtol = endpt_rtol
|
| 89 |
+
else:
|
| 90 |
+
self.endpt_rtol = [endpt_rtol]*len(self.argspec)
|
| 91 |
+
if isinstance(endpt_atol, list):
|
| 92 |
+
self.endpt_atol = endpt_atol
|
| 93 |
+
else:
|
| 94 |
+
self.endpt_atol = [endpt_atol]*len(self.argspec)
|
| 95 |
+
else:
|
| 96 |
+
self.endpt_rtol = None
|
| 97 |
+
self.endpt_atol = None
|
| 98 |
+
|
| 99 |
+
def idmap(self, *args):
|
| 100 |
+
if self.spfunc_first:
|
| 101 |
+
res = self.spfunc(*args)
|
| 102 |
+
if np.isnan(res):
|
| 103 |
+
return np.nan
|
| 104 |
+
args = list(args)
|
| 105 |
+
args[self.index] = res
|
| 106 |
+
with mpmath.workdps(self.dps):
|
| 107 |
+
res = self.mpfunc(*tuple(args))
|
| 108 |
+
# Imaginary parts are spurious
|
| 109 |
+
res = mpf2float(res.real)
|
| 110 |
+
else:
|
| 111 |
+
with mpmath.workdps(self.dps):
|
| 112 |
+
res = self.mpfunc(*args)
|
| 113 |
+
res = mpf2float(res.real)
|
| 114 |
+
args = list(args)
|
| 115 |
+
args[self.index] = res
|
| 116 |
+
res = self.spfunc(*tuple(args))
|
| 117 |
+
return res
|
| 118 |
+
|
| 119 |
+
def get_param_filter(self):
|
| 120 |
+
if self.endpt_rtol is None and self.endpt_atol is None:
|
| 121 |
+
return None
|
| 122 |
+
|
| 123 |
+
filters = []
|
| 124 |
+
for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec):
|
| 125 |
+
if rtol is None and atol is None:
|
| 126 |
+
filters.append(None)
|
| 127 |
+
continue
|
| 128 |
+
elif rtol is None:
|
| 129 |
+
rtol = 0.0
|
| 130 |
+
elif atol is None:
|
| 131 |
+
atol = 0.0
|
| 132 |
+
|
| 133 |
+
filters.append(EndpointFilter(spec.a, spec.b, rtol, atol))
|
| 134 |
+
return filters
|
| 135 |
+
|
| 136 |
+
def check(self):
|
| 137 |
+
# Generate values for the arguments
|
| 138 |
+
args = get_args(self.argspec, self.n)
|
| 139 |
+
param_filter = self.get_param_filter()
|
| 140 |
+
param_columns = tuple(range(args.shape[1]))
|
| 141 |
+
result_columns = args.shape[1]
|
| 142 |
+
args = np.hstack((args, args[:, self.index].reshape(args.shape[0], 1)))
|
| 143 |
+
FuncData(self.idmap, args,
|
| 144 |
+
param_columns=param_columns, result_columns=result_columns,
|
| 145 |
+
rtol=self.rtol, atol=self.atol, vectorized=False,
|
| 146 |
+
param_filter=param_filter).check()
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def _assert_inverts(*a, **kw):
|
| 150 |
+
d = _CDFData(*a, **kw)
|
| 151 |
+
d.check()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def _binomial_cdf(k, n, p):
|
| 155 |
+
k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p)
|
| 156 |
+
if k <= 0:
|
| 157 |
+
return mpmath.mpf(0)
|
| 158 |
+
elif k >= n:
|
| 159 |
+
return mpmath.mpf(1)
|
| 160 |
+
|
| 161 |
+
onemp = mpmath.fsub(1, p, exact=True)
|
| 162 |
+
return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def _f_cdf(dfn, dfd, x):
|
| 166 |
+
if x < 0:
|
| 167 |
+
return mpmath.mpf(0)
|
| 168 |
+
dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x)
|
| 169 |
+
ub = dfn*x/(dfn*x + dfd)
|
| 170 |
+
res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True)
|
| 171 |
+
return res
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _student_t_cdf(df, t, dps=None):
|
| 175 |
+
if dps is None:
|
| 176 |
+
dps = mpmath.mp.dps
|
| 177 |
+
with mpmath.workdps(dps):
|
| 178 |
+
df, t = mpmath.mpf(df), mpmath.mpf(t)
|
| 179 |
+
fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df)
|
| 180 |
+
fac *= t*mpmath.gamma(0.5*(df + 1))
|
| 181 |
+
fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df)
|
| 182 |
+
return 0.5 + fac
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _noncentral_chi_pdf(t, df, nc):
|
| 186 |
+
res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t))
|
| 187 |
+
res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2
|
| 188 |
+
return res
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _noncentral_chi_cdf(x, df, nc, dps=None):
|
| 192 |
+
if dps is None:
|
| 193 |
+
dps = mpmath.mp.dps
|
| 194 |
+
x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc)
|
| 195 |
+
with mpmath.workdps(dps):
|
| 196 |
+
res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x])
|
| 197 |
+
return res
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _tukey_lmbda_quantile(p, lmbda):
|
| 201 |
+
# For lmbda != 0
|
| 202 |
+
return (p**lmbda - (1 - p)**lmbda)/lmbda
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@pytest.mark.slow
|
| 206 |
+
@check_version(mpmath, '0.19')
|
| 207 |
+
class TestCDFlib:
|
| 208 |
+
|
| 209 |
+
@pytest.mark.xfail(run=False)
|
| 210 |
+
def test_bdtrik(self):
|
| 211 |
+
_assert_inverts(
|
| 212 |
+
sp.bdtrik,
|
| 213 |
+
_binomial_cdf,
|
| 214 |
+
0, [ProbArg(), IntArg(1, 1000), ProbArg()],
|
| 215 |
+
rtol=1e-4)
|
| 216 |
+
|
| 217 |
+
def test_bdtrin(self):
|
| 218 |
+
_assert_inverts(
|
| 219 |
+
sp.bdtrin,
|
| 220 |
+
_binomial_cdf,
|
| 221 |
+
1, [IntArg(1, 1000), ProbArg(), ProbArg()],
|
| 222 |
+
rtol=1e-4, endpt_atol=[None, None, 1e-6])
|
| 223 |
+
|
| 224 |
+
def test_btdtria(self):
|
| 225 |
+
_assert_inverts(
|
| 226 |
+
sp.btdtria,
|
| 227 |
+
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
|
| 228 |
+
0, [ProbArg(), Arg(0, 1e2, inclusive_a=False),
|
| 229 |
+
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
|
| 230 |
+
rtol=1e-6)
|
| 231 |
+
|
| 232 |
+
def test_btdtrib(self):
|
| 233 |
+
# Use small values of a or mpmath doesn't converge
|
| 234 |
+
_assert_inverts(
|
| 235 |
+
sp.btdtrib,
|
| 236 |
+
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
|
| 237 |
+
1,
|
| 238 |
+
[Arg(0, 1e2, inclusive_a=False), ProbArg(),
|
| 239 |
+
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
|
| 240 |
+
rtol=1e-7,
|
| 241 |
+
endpt_atol=[None, 1e-18, 1e-15])
|
| 242 |
+
|
| 243 |
+
@pytest.mark.xfail(run=False)
|
| 244 |
+
def test_fdtridfd(self):
|
| 245 |
+
_assert_inverts(
|
| 246 |
+
sp.fdtridfd,
|
| 247 |
+
_f_cdf,
|
| 248 |
+
1,
|
| 249 |
+
[IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)],
|
| 250 |
+
rtol=1e-7)
|
| 251 |
+
|
| 252 |
+
def test_gdtria(self):
|
| 253 |
+
_assert_inverts(
|
| 254 |
+
sp.gdtria,
|
| 255 |
+
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
|
| 256 |
+
0,
|
| 257 |
+
[ProbArg(), Arg(0, 1e3, inclusive_a=False),
|
| 258 |
+
Arg(0, 1e4, inclusive_a=False)],
|
| 259 |
+
rtol=1e-7,
|
| 260 |
+
endpt_atol=[None, 1e-7, 1e-10])
|
| 261 |
+
|
| 262 |
+
def test_gdtrib(self):
|
| 263 |
+
# Use small values of a and x or mpmath doesn't converge
|
| 264 |
+
_assert_inverts(
|
| 265 |
+
sp.gdtrib,
|
| 266 |
+
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
|
| 267 |
+
1,
|
| 268 |
+
[Arg(0, 1e2, inclusive_a=False), ProbArg(),
|
| 269 |
+
Arg(0, 1e3, inclusive_a=False)],
|
| 270 |
+
rtol=1e-5)
|
| 271 |
+
|
| 272 |
+
def test_gdtrix(self):
|
| 273 |
+
_assert_inverts(
|
| 274 |
+
sp.gdtrix,
|
| 275 |
+
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
|
| 276 |
+
2,
|
| 277 |
+
[Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False),
|
| 278 |
+
ProbArg()],
|
| 279 |
+
rtol=1e-7,
|
| 280 |
+
endpt_atol=[None, 1e-7, 1e-10])
|
| 281 |
+
|
| 282 |
+
# Overall nrdtrimn and nrdtrisd are not performing well with infeasible/edge
|
| 283 |
+
# combinations of sigma and x, hence restricted the domains to still use the
|
| 284 |
+
# testing machinery, also see gh-20069
|
| 285 |
+
|
| 286 |
+
# nrdtrimn signature: p, sd, x
|
| 287 |
+
# nrdtrisd signature: mn, p, x
|
| 288 |
+
def test_nrdtrimn(self):
|
| 289 |
+
_assert_inverts(
|
| 290 |
+
sp.nrdtrimn,
|
| 291 |
+
lambda x, y, z: mpmath.ncdf(z, x, y),
|
| 292 |
+
0,
|
| 293 |
+
[ProbArg(), # CDF value p
|
| 294 |
+
Arg(0.1, np.inf, inclusive_a=False, inclusive_b=False), # sigma
|
| 295 |
+
Arg(-1e10, 1e10)], # x
|
| 296 |
+
rtol=1e-5)
|
| 297 |
+
|
| 298 |
+
def test_nrdtrisd(self):
|
| 299 |
+
_assert_inverts(
|
| 300 |
+
sp.nrdtrisd,
|
| 301 |
+
lambda x, y, z: mpmath.ncdf(z, x, y),
|
| 302 |
+
1,
|
| 303 |
+
[Arg(-np.inf, 10, inclusive_a=False, inclusive_b=False), # mn
|
| 304 |
+
ProbArg(), # CDF value p
|
| 305 |
+
Arg(10, 1e100)], # x
|
| 306 |
+
rtol=1e-5)
|
| 307 |
+
|
| 308 |
+
def test_stdtr(self):
|
| 309 |
+
# Ideally the left endpoint for Arg() should be 0.
|
| 310 |
+
assert_mpmath_equal(
|
| 311 |
+
sp.stdtr,
|
| 312 |
+
_student_t_cdf,
|
| 313 |
+
[IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7)
|
| 314 |
+
|
| 315 |
+
@pytest.mark.xfail(run=False)
|
| 316 |
+
def test_stdtridf(self):
|
| 317 |
+
_assert_inverts(
|
| 318 |
+
sp.stdtridf,
|
| 319 |
+
_student_t_cdf,
|
| 320 |
+
0, [ProbArg(), Arg()], rtol=1e-7)
|
| 321 |
+
|
| 322 |
+
def test_stdtrit(self):
|
| 323 |
+
_assert_inverts(
|
| 324 |
+
sp.stdtrit,
|
| 325 |
+
_student_t_cdf,
|
| 326 |
+
1, [IntArg(1, 100), ProbArg()], rtol=1e-7,
|
| 327 |
+
endpt_atol=[None, 1e-10])
|
| 328 |
+
|
| 329 |
+
def test_chdtriv(self):
|
| 330 |
+
_assert_inverts(
|
| 331 |
+
sp.chdtriv,
|
| 332 |
+
lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True),
|
| 333 |
+
0, [ProbArg(), IntArg(1, 100)], rtol=1e-4)
|
| 334 |
+
|
| 335 |
+
@pytest.mark.xfail(run=False)
|
| 336 |
+
def test_chndtridf(self):
|
| 337 |
+
# Use a larger atol since mpmath is doing numerical integration
|
| 338 |
+
_assert_inverts(
|
| 339 |
+
sp.chndtridf,
|
| 340 |
+
_noncentral_chi_cdf,
|
| 341 |
+
1, [Arg(0, 100, inclusive_a=False), ProbArg(),
|
| 342 |
+
Arg(0, 100, inclusive_a=False)],
|
| 343 |
+
n=1000, rtol=1e-4, atol=1e-15)
|
| 344 |
+
|
| 345 |
+
@pytest.mark.xfail(run=False)
|
| 346 |
+
def test_chndtrinc(self):
|
| 347 |
+
# Use a larger atol since mpmath is doing numerical integration
|
| 348 |
+
_assert_inverts(
|
| 349 |
+
sp.chndtrinc,
|
| 350 |
+
_noncentral_chi_cdf,
|
| 351 |
+
2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()],
|
| 352 |
+
n=1000, rtol=1e-4, atol=1e-15)
|
| 353 |
+
|
| 354 |
+
def test_chndtrix(self):
|
| 355 |
+
# Use a larger atol since mpmath is doing numerical integration
|
| 356 |
+
_assert_inverts(
|
| 357 |
+
sp.chndtrix,
|
| 358 |
+
_noncentral_chi_cdf,
|
| 359 |
+
0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)],
|
| 360 |
+
n=1000, rtol=1e-4, atol=1e-15,
|
| 361 |
+
endpt_atol=[1e-6, None, None])
|
| 362 |
+
|
| 363 |
+
def test_tklmbda_zero_shape(self):
|
| 364 |
+
# When lmbda = 0 the CDF has a simple closed form
|
| 365 |
+
one = mpmath.mpf(1)
|
| 366 |
+
assert_mpmath_equal(
|
| 367 |
+
lambda x: sp.tklmbda(x, 0),
|
| 368 |
+
lambda x: one/(mpmath.exp(-x) + one),
|
| 369 |
+
[Arg()], rtol=1e-7)
|
| 370 |
+
|
| 371 |
+
def test_tklmbda_neg_shape(self):
|
| 372 |
+
_assert_inverts(
|
| 373 |
+
sp.tklmbda,
|
| 374 |
+
_tukey_lmbda_quantile,
|
| 375 |
+
0, [ProbArg(), Arg(-25, 0, inclusive_b=False)],
|
| 376 |
+
spfunc_first=False, rtol=1e-5,
|
| 377 |
+
endpt_atol=[1e-9, 1e-5])
|
| 378 |
+
|
| 379 |
+
@pytest.mark.xfail(run=False)
|
| 380 |
+
def test_tklmbda_pos_shape(self):
|
| 381 |
+
_assert_inverts(
|
| 382 |
+
sp.tklmbda,
|
| 383 |
+
_tukey_lmbda_quantile,
|
| 384 |
+
0, [ProbArg(), Arg(0, 100, inclusive_a=False)],
|
| 385 |
+
spfunc_first=False, rtol=1e-5)
|
| 386 |
+
|
| 387 |
+
# The values of lmdba are chosen so that 1/lmbda is exact.
|
| 388 |
+
@pytest.mark.parametrize('lmbda', [0.5, 1.0, 8.0])
|
| 389 |
+
def test_tklmbda_lmbda1(self, lmbda):
|
| 390 |
+
bound = 1/lmbda
|
| 391 |
+
assert_equal(sp.tklmbda([-bound, bound], lmbda), [0.0, 1.0])
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
funcs = [
|
| 395 |
+
("btdtria", 3),
|
| 396 |
+
("btdtrib", 3),
|
| 397 |
+
("bdtrik", 3),
|
| 398 |
+
("bdtrin", 3),
|
| 399 |
+
("chdtriv", 2),
|
| 400 |
+
("chndtr", 3),
|
| 401 |
+
("chndtrix", 3),
|
| 402 |
+
("chndtridf", 3),
|
| 403 |
+
("chndtrinc", 3),
|
| 404 |
+
("fdtridfd", 3),
|
| 405 |
+
("ncfdtr", 4),
|
| 406 |
+
("ncfdtri", 4),
|
| 407 |
+
("ncfdtridfn", 4),
|
| 408 |
+
("ncfdtridfd", 4),
|
| 409 |
+
("ncfdtrinc", 4),
|
| 410 |
+
("gdtrix", 3),
|
| 411 |
+
("gdtrib", 3),
|
| 412 |
+
("gdtria", 3),
|
| 413 |
+
("nbdtrik", 3),
|
| 414 |
+
("nbdtrin", 3),
|
| 415 |
+
("nrdtrimn", 3),
|
| 416 |
+
("nrdtrisd", 3),
|
| 417 |
+
("pdtrik", 2),
|
| 418 |
+
("stdtr", 2),
|
| 419 |
+
("stdtrit", 2),
|
| 420 |
+
("stdtridf", 2),
|
| 421 |
+
("nctdtr", 3),
|
| 422 |
+
("nctdtrit", 3),
|
| 423 |
+
("nctdtridf", 3),
|
| 424 |
+
("nctdtrinc", 3),
|
| 425 |
+
("tklmbda", 2),
|
| 426 |
+
]
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
@pytest.mark.parametrize('func,numargs', funcs, ids=[x[0] for x in funcs])
|
| 430 |
+
def test_nonfinite(func, numargs):
|
| 431 |
+
|
| 432 |
+
rng = np.random.default_rng(1701299355559735)
|
| 433 |
+
func = getattr(sp, func)
|
| 434 |
+
args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in rng.random(numargs)]
|
| 435 |
+
|
| 436 |
+
for args in itertools.product(*args_choices):
|
| 437 |
+
res = func(*args)
|
| 438 |
+
|
| 439 |
+
if any(np.isnan(x) for x in args):
|
| 440 |
+
# Nan inputs should result to nan output
|
| 441 |
+
assert_equal(res, np.nan)
|
| 442 |
+
else:
|
| 443 |
+
# All other inputs should return something (but not
|
| 444 |
+
# raise exceptions or cause hangs)
|
| 445 |
+
pass
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def test_chndtrix_gh2158():
|
| 449 |
+
# test that gh-2158 is resolved; previously this blew up
|
| 450 |
+
res = sp.chndtrix(0.999999, 2, np.arange(20.)+1e-6)
|
| 451 |
+
|
| 452 |
+
# Generated in R
|
| 453 |
+
# options(digits=16)
|
| 454 |
+
# ncp <- seq(0, 19) + 1e-6
|
| 455 |
+
# print(qchisq(0.999999, df = 2, ncp = ncp))
|
| 456 |
+
res_exp = [27.63103493142305, 35.25728589950540, 39.97396073236288,
|
| 457 |
+
43.88033702110538, 47.35206403482798, 50.54112500166103,
|
| 458 |
+
53.52720257322766, 56.35830042867810, 59.06600769498512,
|
| 459 |
+
61.67243118946381, 64.19376191277179, 66.64228141346548,
|
| 460 |
+
69.02756927200180, 71.35726934749408, 73.63759723904816,
|
| 461 |
+
75.87368842650227, 78.06984431185720, 80.22971052389806,
|
| 462 |
+
82.35640899964173, 84.45263768373256]
|
| 463 |
+
assert_allclose(res, res_exp)
|
| 464 |
+
|
| 465 |
+
@pytest.mark.xfail_on_32bit("32bit fails due to algorithm threshold")
|
| 466 |
+
def test_nctdtr_gh19896():
|
| 467 |
+
# test that gh-19896 is resolved.
|
| 468 |
+
# Compared to SciPy 1.11 results from Fortran code.
|
| 469 |
+
dfarr = [0.98, 9.8, 98, 980]
|
| 470 |
+
pnoncarr = [-3.8, 0.38, 3.8, 38]
|
| 471 |
+
tarr = [0.0015, 0.15, 1.5, 15]
|
| 472 |
+
resarr = [0.9999276519560749, 0.9999276519560749, 0.9999908831755221,
|
| 473 |
+
0.9999990265452424, 0.3524153312279712, 0.39749697267251416,
|
| 474 |
+
0.7168629634895805, 0.9656246449259646, 7.234804392512006e-05,
|
| 475 |
+
7.234804392512006e-05, 0.03538804607509127, 0.795482701508521,
|
| 476 |
+
0.0, 0.0, 0.0,
|
| 477 |
+
0.011927908523093889, 0.9999276519560749, 0.9999276519560749,
|
| 478 |
+
0.9999997441133123, 1.0, 0.3525155979118013,
|
| 479 |
+
0.4076312014048369, 0.8476794017035086, 0.9999999297116268,
|
| 480 |
+
7.234804392512006e-05, 7.234804392512006e-05, 0.013477443099785824,
|
| 481 |
+
0.9998501512331494, 0.0, 0.0,
|
| 482 |
+
0.0, 6.561112613212572e-07, 0.9999276519560749,
|
| 483 |
+
0.9999276519560749, 0.9999999313496014, 1.0,
|
| 484 |
+
0.3525281784865706, 0.40890253001898014, 0.8664672830017024,
|
| 485 |
+
1.0, 7.234804392512006e-05, 7.234804392512006e-05,
|
| 486 |
+
0.010990889489704836, 1.0, 0.0,
|
| 487 |
+
0.0, 0.0, 0.0,
|
| 488 |
+
0.9999276519560749, 0.9999276519560749, 0.9999999418789304,
|
| 489 |
+
1.0, 0.35252945487817355, 0.40903153246690993,
|
| 490 |
+
0.8684247068528264, 1.0, 7.234804392512006e-05,
|
| 491 |
+
7.234804392512006e-05, 0.01075068918582911, 1.0,
|
| 492 |
+
0.0, 0.0, 0.0, 0.0]
|
| 493 |
+
actarr = []
|
| 494 |
+
for df, p, t in itertools.product(dfarr, pnoncarr, tarr):
|
| 495 |
+
actarr += [sp.nctdtr(df, p, t)]
|
| 496 |
+
# The rtol is kept high on purpose to make it pass on 32bit systems
|
| 497 |
+
assert_allclose(actarr, resarr, rtol=1e-6, atol=0.0)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
def test_nctdtrinc_gh19896():
|
| 501 |
+
# test that gh-19896 is resolved.
|
| 502 |
+
# Compared to SciPy 1.11 results from Fortran code.
|
| 503 |
+
dfarr = [0.001, 0.98, 9.8, 98, 980, 10000, 98, 9.8, 0.98, 0.001]
|
| 504 |
+
parr = [0.001, 0.1, 0.3, 0.8, 0.999, 0.001, 0.1, 0.3, 0.8, 0.999]
|
| 505 |
+
tarr = [0.0015, 0.15, 1.5, 15, 300, 0.0015, 0.15, 1.5, 15, 300]
|
| 506 |
+
desired = [3.090232306168629, 1.406141304556198, 2.014225177124157,
|
| 507 |
+
13.727067118283456, 278.9765683871208, 3.090232306168629,
|
| 508 |
+
1.4312427877936222, 2.014225177124157, 3.712743137978295,
|
| 509 |
+
-3.086951096691082]
|
| 510 |
+
actual = sp.nctdtrinc(dfarr, parr, tarr)
|
| 511 |
+
assert_allclose(actual, desired, rtol=5e-12, atol=0.0)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def test_stdtr_stdtrit_neg_inf():
|
| 515 |
+
# -inf was treated as +inf and values from the normal were returned
|
| 516 |
+
assert np.all(np.isnan(sp.stdtr(-np.inf, [-np.inf, -1.0, 0.0, 1.0, np.inf])))
|
| 517 |
+
assert np.all(np.isnan(sp.stdtrit(-np.inf, [0.0, 0.25, 0.5, 0.75, 1.0])))
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def test_bdtrik_nbdtrik_inf():
|
| 521 |
+
y = np.array(
|
| 522 |
+
[np.nan,-np.inf,-10.0, -1.0, 0.0, .00001, .5, 0.9999, 1.0, 10.0, np.inf])
|
| 523 |
+
y = y[:,None]
|
| 524 |
+
p = np.atleast_2d(
|
| 525 |
+
[np.nan, -np.inf, -10.0, -1.0, 0.0, .00001, .5, 1.0, np.inf])
|
| 526 |
+
assert np.all(np.isnan(sp.bdtrik(y, np.inf, p)))
|
| 527 |
+
assert np.all(np.isnan(sp.nbdtrik(y, np.inf, p)))
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_cdft_asymptotic.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# gh-14777 regression tests
|
| 2 |
+
# Test stdtr and stdtrit with infinite df and large values of df
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import assert_allclose, assert_equal
|
| 6 |
+
from scipy.special import stdtr, stdtrit, ndtr, ndtri
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def test_stdtr_vs_R_large_df():
|
| 10 |
+
df = [1e10, 1e12, 1e120, np.inf]
|
| 11 |
+
t = 1.
|
| 12 |
+
res = stdtr(df, t)
|
| 13 |
+
# R Code:
|
| 14 |
+
# options(digits=20)
|
| 15 |
+
# pt(1., c(1e10, 1e12, 1e120, Inf))
|
| 16 |
+
res_R = [0.84134474605644460343,
|
| 17 |
+
0.84134474606842180044,
|
| 18 |
+
0.84134474606854281475,
|
| 19 |
+
0.84134474606854292578]
|
| 20 |
+
assert_allclose(res, res_R, rtol=2e-15)
|
| 21 |
+
# last value should also agree with ndtr
|
| 22 |
+
assert_equal(res[3], ndtr(1.))
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def test_stdtrit_vs_R_large_df():
|
| 26 |
+
df = [1e10, 1e12, 1e120, np.inf]
|
| 27 |
+
p = 0.1
|
| 28 |
+
res = stdtrit(df, p)
|
| 29 |
+
# R Code:
|
| 30 |
+
# options(digits=20)
|
| 31 |
+
# qt(0.1, c(1e10, 1e12, 1e120, Inf))
|
| 32 |
+
res_R = [-1.2815515656292593150,
|
| 33 |
+
-1.2815515655454472466,
|
| 34 |
+
-1.2815515655446008125,
|
| 35 |
+
-1.2815515655446008125]
|
| 36 |
+
assert_allclose(res, res_R, rtol=1e-14, atol=1e-15)
|
| 37 |
+
# last value should also agree with ndtri
|
| 38 |
+
assert_equal(res[3], ndtri(0.1))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_stdtr_stdtri_invalid():
|
| 42 |
+
# a mix of large and inf df with t/p equal to nan
|
| 43 |
+
df = [1e10, 1e12, 1e120, np.inf]
|
| 44 |
+
x = np.nan
|
| 45 |
+
res1 = stdtr(df, x)
|
| 46 |
+
res2 = stdtrit(df, x)
|
| 47 |
+
res_ex = 4*[np.nan]
|
| 48 |
+
assert_equal(res1, res_ex)
|
| 49 |
+
assert_equal(res2, res_ex)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_data.py
ADDED
|
@@ -0,0 +1,725 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.resources
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import suppress_warnings
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from scipy.special import (
|
| 8 |
+
lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite,
|
| 9 |
+
eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta,
|
| 10 |
+
jn, jv, jvp, yn, yv, yvp, iv, ivp, kn, kv, kvp,
|
| 11 |
+
gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma,
|
| 12 |
+
beta, betainc, betaincinv, poch,
|
| 13 |
+
ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc,
|
| 14 |
+
elliprc, elliprd, elliprf, elliprg, elliprj,
|
| 15 |
+
erf, erfc, erfinv, erfcinv, exp1, expi, expn,
|
| 16 |
+
bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib,
|
| 17 |
+
nbdtrik, pdtrik, owens_t,
|
| 18 |
+
mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1,
|
| 19 |
+
mathieu_modsem1, mathieu_modcem2, mathieu_modsem2,
|
| 20 |
+
ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, wright_bessel
|
| 21 |
+
)
|
| 22 |
+
from scipy.integrate import IntegrationWarning
|
| 23 |
+
|
| 24 |
+
from scipy.special._testutils import FuncData
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# The npz files are generated, and hence may live in the build dir. We can only
|
| 28 |
+
# access them through `importlib.resources`, not an explicit path from `__file__`
|
| 29 |
+
_datadir = importlib.resources.files('scipy.special.tests.data')
|
| 30 |
+
|
| 31 |
+
_boost_npz = _datadir.joinpath('boost.npz')
|
| 32 |
+
with importlib.resources.as_file(_boost_npz) as f:
|
| 33 |
+
DATASETS_BOOST = np.load(f)
|
| 34 |
+
|
| 35 |
+
_gsl_npz = _datadir.joinpath('gsl.npz')
|
| 36 |
+
with importlib.resources.as_file(_gsl_npz) as f:
|
| 37 |
+
DATASETS_GSL = np.load(f)
|
| 38 |
+
|
| 39 |
+
_local_npz = _datadir.joinpath('local.npz')
|
| 40 |
+
with importlib.resources.as_file(_local_npz) as f:
|
| 41 |
+
DATASETS_LOCAL = np.load(f)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def data(func, dataname, *a, **kw):
|
| 45 |
+
kw.setdefault('dataname', dataname)
|
| 46 |
+
return FuncData(func, DATASETS_BOOST[dataname], *a, **kw)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def data_gsl(func, dataname, *a, **kw):
|
| 50 |
+
kw.setdefault('dataname', dataname)
|
| 51 |
+
return FuncData(func, DATASETS_GSL[dataname], *a, **kw)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def data_local(func, dataname, *a, **kw):
|
| 55 |
+
kw.setdefault('dataname', dataname)
|
| 56 |
+
return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def ellipk_(k):
|
| 60 |
+
return ellipk(k*k)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def ellipkinc_(f, k):
|
| 64 |
+
return ellipkinc(f, k*k)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def ellipe_(k):
|
| 68 |
+
return ellipe(k*k)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def ellipeinc_(f, k):
|
| 72 |
+
return ellipeinc(f, k*k)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def zeta_(x):
|
| 76 |
+
return zeta(x, 1.)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def assoc_legendre_p_boost_(nu, mu, x):
|
| 80 |
+
# the boost test data is for integer orders only
|
| 81 |
+
return lpmv(mu, nu.astype(int), x)
|
| 82 |
+
|
| 83 |
+
def legendre_p_via_assoc_(nu, x):
|
| 84 |
+
return lpmv(0, nu, x)
|
| 85 |
+
|
| 86 |
+
def lpn_(n, x):
|
| 87 |
+
return lpn(n.astype('l'), x)[0][-1]
|
| 88 |
+
|
| 89 |
+
def lqn_(n, x):
|
| 90 |
+
return lqn(n.astype('l'), x)[0][-1]
|
| 91 |
+
|
| 92 |
+
def legendre_p_via_lpmn(n, x):
|
| 93 |
+
return lpmn(0, n, x)[0][0,-1]
|
| 94 |
+
|
| 95 |
+
def legendre_q_via_lqmn(n, x):
|
| 96 |
+
return lqmn(0, n, x)[0][0,-1]
|
| 97 |
+
|
| 98 |
+
def mathieu_ce_rad(m, q, x):
|
| 99 |
+
return mathieu_cem(m, q, x*180/np.pi)[0]
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def mathieu_se_rad(m, q, x):
|
| 103 |
+
return mathieu_sem(m, q, x*180/np.pi)[0]
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def mathieu_mc1_scaled(m, q, x):
|
| 107 |
+
# GSL follows a different normalization.
|
| 108 |
+
# We follow Abramowitz & Stegun, they apparently something else.
|
| 109 |
+
return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def mathieu_ms1_scaled(m, q, x):
|
| 113 |
+
return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def mathieu_mc2_scaled(m, q, x):
|
| 117 |
+
return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def mathieu_ms2_scaled(m, q, x):
|
| 121 |
+
return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2)
|
| 122 |
+
|
| 123 |
+
def eval_legendre_ld(n, x):
|
| 124 |
+
return eval_legendre(n.astype('l'), x)
|
| 125 |
+
|
| 126 |
+
def eval_legendre_dd(n, x):
|
| 127 |
+
return eval_legendre(n.astype('d'), x)
|
| 128 |
+
|
| 129 |
+
def eval_hermite_ld(n, x):
|
| 130 |
+
return eval_hermite(n.astype('l'), x)
|
| 131 |
+
|
| 132 |
+
def eval_laguerre_ld(n, x):
|
| 133 |
+
return eval_laguerre(n.astype('l'), x)
|
| 134 |
+
|
| 135 |
+
def eval_laguerre_dd(n, x):
|
| 136 |
+
return eval_laguerre(n.astype('d'), x)
|
| 137 |
+
|
| 138 |
+
def eval_genlaguerre_ldd(n, a, x):
|
| 139 |
+
return eval_genlaguerre(n.astype('l'), a, x)
|
| 140 |
+
|
| 141 |
+
def eval_genlaguerre_ddd(n, a, x):
|
| 142 |
+
return eval_genlaguerre(n.astype('d'), a, x)
|
| 143 |
+
|
| 144 |
+
def bdtrik_comp(y, n, p):
|
| 145 |
+
return bdtrik(1-y, n, p)
|
| 146 |
+
|
| 147 |
+
def btdtri_comp(a, b, p):
|
| 148 |
+
return btdtri(a, b, 1-p)
|
| 149 |
+
|
| 150 |
+
def btdtria_comp(p, b, x):
|
| 151 |
+
return btdtria(1-p, b, x)
|
| 152 |
+
|
| 153 |
+
def btdtrib_comp(a, p, x):
|
| 154 |
+
return btdtrib(a, 1-p, x)
|
| 155 |
+
|
| 156 |
+
def gdtr_(p, x):
|
| 157 |
+
return gdtr(1.0, p, x)
|
| 158 |
+
|
| 159 |
+
def gdtrc_(p, x):
|
| 160 |
+
return gdtrc(1.0, p, x)
|
| 161 |
+
|
| 162 |
+
def gdtrix_(b, p):
|
| 163 |
+
return gdtrix(1.0, b, p)
|
| 164 |
+
|
| 165 |
+
def gdtrix_comp(b, p):
|
| 166 |
+
return gdtrix(1.0, b, 1-p)
|
| 167 |
+
|
| 168 |
+
def gdtrib_(p, x):
|
| 169 |
+
return gdtrib(1.0, p, x)
|
| 170 |
+
|
| 171 |
+
def gdtrib_comp(p, x):
|
| 172 |
+
return gdtrib(1.0, 1-p, x)
|
| 173 |
+
|
| 174 |
+
def nbdtrik_comp(y, n, p):
|
| 175 |
+
return nbdtrik(1-y, n, p)
|
| 176 |
+
|
| 177 |
+
def pdtrik_comp(p, m):
|
| 178 |
+
return pdtrik(1-p, m)
|
| 179 |
+
|
| 180 |
+
def poch_(z, m):
|
| 181 |
+
return 1.0 / poch(z, m)
|
| 182 |
+
|
| 183 |
+
def poch_minus(z, m):
|
| 184 |
+
return 1.0 / poch(z, -m)
|
| 185 |
+
|
| 186 |
+
def spherical_jn_(n, x):
|
| 187 |
+
return spherical_jn(n.astype('l'), x)
|
| 188 |
+
|
| 189 |
+
def spherical_yn_(n, x):
|
| 190 |
+
return spherical_yn(n.astype('l'), x)
|
| 191 |
+
|
| 192 |
+
def sph_harm_(m, n, theta, phi):
|
| 193 |
+
y = sph_harm(m, n, theta, phi)
|
| 194 |
+
return (y.real, y.imag)
|
| 195 |
+
|
| 196 |
+
def cexpm1(x, y):
|
| 197 |
+
z = expm1(x + 1j*y)
|
| 198 |
+
return z.real, z.imag
|
| 199 |
+
|
| 200 |
+
def clog1p(x, y):
|
| 201 |
+
z = log1p(x + 1j*y)
|
| 202 |
+
return z.real, z.imag
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
BOOST_TESTS = [
|
| 206 |
+
data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p',
|
| 207 |
+
(0,1,2), 3, rtol=1e-11),
|
| 208 |
+
|
| 209 |
+
data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p',
|
| 210 |
+
(0,1), 2, rtol=1e-11),
|
| 211 |
+
data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large',
|
| 212 |
+
(0,1), 2, rtol=9.6e-14),
|
| 213 |
+
data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p',
|
| 214 |
+
(0,1), 2, rtol=5e-14, vectorized=False),
|
| 215 |
+
data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large',
|
| 216 |
+
(0,1), 2, rtol=9.6e-14, vectorized=False),
|
| 217 |
+
data(lpn_, 'legendre_p_ipp-legendre_p',
|
| 218 |
+
(0,1), 2, rtol=5e-14, vectorized=False),
|
| 219 |
+
data(lpn_, 'legendre_p_large_ipp-legendre_p_large',
|
| 220 |
+
(0,1), 2, rtol=3e-13, vectorized=False),
|
| 221 |
+
data(eval_legendre_ld, 'legendre_p_ipp-legendre_p',
|
| 222 |
+
(0,1), 2, rtol=6e-14),
|
| 223 |
+
data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large',
|
| 224 |
+
(0,1), 2, rtol=2e-13),
|
| 225 |
+
data(eval_legendre_dd, 'legendre_p_ipp-legendre_p',
|
| 226 |
+
(0,1), 2, rtol=2e-14),
|
| 227 |
+
data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large',
|
| 228 |
+
(0,1), 2, rtol=2e-13),
|
| 229 |
+
|
| 230 |
+
data(lqn_, 'legendre_p_ipp-legendre_p',
|
| 231 |
+
(0,1), 3, rtol=2e-14, vectorized=False),
|
| 232 |
+
data(lqn_, 'legendre_p_large_ipp-legendre_p_large',
|
| 233 |
+
(0,1), 3, rtol=2e-12, vectorized=False),
|
| 234 |
+
data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p',
|
| 235 |
+
(0,1), 3, rtol=2e-14, vectorized=False),
|
| 236 |
+
data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large',
|
| 237 |
+
(0,1), 3, rtol=2e-12, vectorized=False),
|
| 238 |
+
|
| 239 |
+
data(beta, 'beta_exp_data_ipp-beta_exp_data',
|
| 240 |
+
(0,1), 2, rtol=1e-13),
|
| 241 |
+
data(beta, 'beta_exp_data_ipp-beta_exp_data',
|
| 242 |
+
(0,1), 2, rtol=1e-13),
|
| 243 |
+
data(beta, 'beta_med_data_ipp-beta_med_data',
|
| 244 |
+
(0,1), 2, rtol=5e-13),
|
| 245 |
+
|
| 246 |
+
data(betainc, 'ibeta_small_data_ipp-ibeta_small_data',
|
| 247 |
+
(0,1,2), 5, rtol=6e-15),
|
| 248 |
+
data(betainc, 'ibeta_data_ipp-ibeta_data',
|
| 249 |
+
(0,1,2), 5, rtol=5e-13),
|
| 250 |
+
data(betainc, 'ibeta_int_data_ipp-ibeta_int_data',
|
| 251 |
+
(0,1,2), 5, rtol=2e-14),
|
| 252 |
+
data(betainc, 'ibeta_large_data_ipp-ibeta_large_data',
|
| 253 |
+
(0,1,2), 5, rtol=4e-10),
|
| 254 |
+
|
| 255 |
+
data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data',
|
| 256 |
+
(0,1,2), 3, rtol=1e-5),
|
| 257 |
+
|
| 258 |
+
data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data',
|
| 259 |
+
(0,1,2), 5, rtol=6e-15),
|
| 260 |
+
data(btdtr, 'ibeta_data_ipp-ibeta_data',
|
| 261 |
+
(0,1,2), 5, rtol=4e-13),
|
| 262 |
+
data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data',
|
| 263 |
+
(0,1,2), 5, rtol=2e-14),
|
| 264 |
+
data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data',
|
| 265 |
+
(0,1,2), 5, rtol=4e-10),
|
| 266 |
+
|
| 267 |
+
data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data',
|
| 268 |
+
(0,1,2), 3, rtol=1e-5),
|
| 269 |
+
data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data',
|
| 270 |
+
(0,1,2), 4, rtol=8e-7),
|
| 271 |
+
|
| 272 |
+
data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data',
|
| 273 |
+
(2,0,1), 3, rtol=5e-9),
|
| 274 |
+
data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data',
|
| 275 |
+
(2,0,1), 4, rtol=5e-9),
|
| 276 |
+
|
| 277 |
+
data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data',
|
| 278 |
+
(0,2,1), 5, rtol=5e-9),
|
| 279 |
+
data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data',
|
| 280 |
+
(0,2,1), 6, rtol=5e-9),
|
| 281 |
+
|
| 282 |
+
data(binom, 'binomial_data_ipp-binomial_data',
|
| 283 |
+
(0,1), 2, rtol=1e-13),
|
| 284 |
+
data(binom, 'binomial_large_data_ipp-binomial_large_data',
|
| 285 |
+
(0,1), 2, rtol=5e-13),
|
| 286 |
+
|
| 287 |
+
data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data',
|
| 288 |
+
(2,0,1), 3, rtol=5e-9),
|
| 289 |
+
data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data',
|
| 290 |
+
(2,0,1), 4, rtol=5e-9),
|
| 291 |
+
|
| 292 |
+
data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data',
|
| 293 |
+
(2,0,1), 3, rtol=4e-9),
|
| 294 |
+
data(nbdtrik_comp,
|
| 295 |
+
'negative_binomial_quantile_ipp-negative_binomial_quantile_data',
|
| 296 |
+
(2,0,1), 4, rtol=4e-9),
|
| 297 |
+
|
| 298 |
+
data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data',
|
| 299 |
+
(1,0), 2, rtol=3e-9),
|
| 300 |
+
data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data',
|
| 301 |
+
(1,0), 3, rtol=4e-9),
|
| 302 |
+
|
| 303 |
+
data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0),
|
| 304 |
+
|
| 305 |
+
data(digamma, 'digamma_data_ipp-digamma_data', 0, 1),
|
| 306 |
+
data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1),
|
| 307 |
+
data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13),
|
| 308 |
+
data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13),
|
| 309 |
+
data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15),
|
| 310 |
+
data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15),
|
| 311 |
+
data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15),
|
| 312 |
+
data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14),
|
| 313 |
+
|
| 314 |
+
data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1),
|
| 315 |
+
data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14),
|
| 316 |
+
data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1),
|
| 317 |
+
data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14),
|
| 318 |
+
|
| 319 |
+
data(erf, 'erf_data_ipp-erf_data', 0, 1),
|
| 320 |
+
data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13),
|
| 321 |
+
data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15),
|
| 322 |
+
data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1),
|
| 323 |
+
data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1),
|
| 324 |
+
data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14),
|
| 325 |
+
data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1),
|
| 326 |
+
data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13),
|
| 327 |
+
data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2),
|
| 328 |
+
|
| 329 |
+
data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1),
|
| 330 |
+
data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1),
|
| 331 |
+
data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data', 0, 1,
|
| 332 |
+
param_filter=(lambda s: s > 0)),
|
| 333 |
+
|
| 334 |
+
data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13),
|
| 335 |
+
data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9),
|
| 336 |
+
data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13),
|
| 337 |
+
data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13),
|
| 338 |
+
data(expi, 'expinti_data_long_ipp-expinti_data_long', 0, 1),
|
| 339 |
+
|
| 340 |
+
data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2),
|
| 341 |
+
data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14),
|
| 342 |
+
|
| 343 |
+
data(gamma, 'test_gamma_data_ipp-near_0', 0, 1),
|
| 344 |
+
data(gamma, 'test_gamma_data_ipp-near_1', 0, 1),
|
| 345 |
+
data(gamma, 'test_gamma_data_ipp-near_2', 0, 1),
|
| 346 |
+
data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1),
|
| 347 |
+
data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12),
|
| 348 |
+
data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14),
|
| 349 |
+
data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9),
|
| 350 |
+
data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9),
|
| 351 |
+
data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9),
|
| 352 |
+
data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9),
|
| 353 |
+
data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9),
|
| 354 |
+
data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13),
|
| 355 |
+
data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11),
|
| 356 |
+
data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11),
|
| 357 |
+
data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10),
|
| 358 |
+
data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11),
|
| 359 |
+
data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11),
|
| 360 |
+
data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2),
|
| 361 |
+
|
| 362 |
+
data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15),
|
| 363 |
+
data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
|
| 364 |
+
data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
|
| 365 |
+
data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12),
|
| 366 |
+
|
| 367 |
+
data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13),
|
| 368 |
+
data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
|
| 369 |
+
data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
|
| 370 |
+
data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9),
|
| 371 |
+
|
| 372 |
+
data(gammaincc, 'igamma_small_data_ipp-igamma_small_data',
|
| 373 |
+
(0,1), 3, rtol=1e-13),
|
| 374 |
+
data(gammaincc, 'igamma_med_data_ipp-igamma_med_data',
|
| 375 |
+
(0,1), 3, rtol=2e-13),
|
| 376 |
+
data(gammaincc, 'igamma_int_data_ipp-igamma_int_data',
|
| 377 |
+
(0,1), 3, rtol=4e-14),
|
| 378 |
+
data(gammaincc, 'igamma_big_data_ipp-igamma_big_data',
|
| 379 |
+
(0,1), 3, rtol=1e-11),
|
| 380 |
+
|
| 381 |
+
data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
|
| 382 |
+
data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
|
| 383 |
+
data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
|
| 384 |
+
data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
|
| 385 |
+
|
| 386 |
+
data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9),
|
| 387 |
+
data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9),
|
| 388 |
+
|
| 389 |
+
data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data',
|
| 390 |
+
(0,1), 2, rtol=2e-13),
|
| 391 |
+
data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int',
|
| 392 |
+
(0,1), 2,),
|
| 393 |
+
data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2',
|
| 394 |
+
(0,1), 2,),
|
| 395 |
+
data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data',
|
| 396 |
+
(0,1), 3, rtol=2e-13),
|
| 397 |
+
data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int',
|
| 398 |
+
(0,1), 3),
|
| 399 |
+
data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2',
|
| 400 |
+
(0,1), 3),
|
| 401 |
+
|
| 402 |
+
data(eval_hermite_ld, 'hermite_ipp-hermite',
|
| 403 |
+
(0,1), 2, rtol=2e-14),
|
| 404 |
+
|
| 405 |
+
data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2',
|
| 406 |
+
(0,1), 2, rtol=7e-12),
|
| 407 |
+
data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2',
|
| 408 |
+
(0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'),
|
| 409 |
+
data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3',
|
| 410 |
+
(0,1,2), 3, rtol=2e-13),
|
| 411 |
+
data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3',
|
| 412 |
+
(0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'),
|
| 413 |
+
|
| 414 |
+
data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1),
|
| 415 |
+
data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2),
|
| 416 |
+
|
| 417 |
+
data(iv, 'bessel_i_data_ipp-bessel_i_data',
|
| 418 |
+
(0,1), 2, rtol=1e-12),
|
| 419 |
+
data(iv, 'bessel_i_data_ipp-bessel_i_data',
|
| 420 |
+
(0,1j), 2, rtol=2e-10, atol=1e-306),
|
| 421 |
+
data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data',
|
| 422 |
+
(0,1), 2, rtol=1e-9),
|
| 423 |
+
data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data',
|
| 424 |
+
(0,1j), 2, rtol=2e-10),
|
| 425 |
+
|
| 426 |
+
data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data',
|
| 427 |
+
(0,1), 2, rtol=1.2e-13),
|
| 428 |
+
data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data',
|
| 429 |
+
(0,1j), 2, rtol=1.2e-13, atol=1e-300),
|
| 430 |
+
|
| 431 |
+
data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
|
| 432 |
+
data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
|
| 433 |
+
data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11),
|
| 434 |
+
data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11),
|
| 435 |
+
|
| 436 |
+
data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
|
| 437 |
+
data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
|
| 438 |
+
data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12),
|
| 439 |
+
data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12),
|
| 440 |
+
|
| 441 |
+
data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data',
|
| 442 |
+
(0,1), 2, rtol=1e-13),
|
| 443 |
+
data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data',
|
| 444 |
+
(0,1j), 2, rtol=1e-13),
|
| 445 |
+
data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data',
|
| 446 |
+
(0,1), 2, rtol=1e-11),
|
| 447 |
+
data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data',
|
| 448 |
+
(0,1j), 2, rtol=2e-11),
|
| 449 |
+
|
| 450 |
+
data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
|
| 451 |
+
|
| 452 |
+
data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
|
| 453 |
+
data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12),
|
| 454 |
+
data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12),
|
| 455 |
+
data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12),
|
| 456 |
+
|
| 457 |
+
data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data',
|
| 458 |
+
(0,1), 2, rtol=3e-14),
|
| 459 |
+
data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data',
|
| 460 |
+
(0,1j), 2, rtol=3e-14),
|
| 461 |
+
data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1), 2, rtol=7e-14),
|
| 462 |
+
data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1j), 2, rtol=7e-14),
|
| 463 |
+
|
| 464 |
+
data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12),
|
| 465 |
+
data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
|
| 466 |
+
|
| 467 |
+
data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
|
| 468 |
+
data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12),
|
| 469 |
+
data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10),
|
| 470 |
+
data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10),
|
| 471 |
+
|
| 472 |
+
data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data',
|
| 473 |
+
(0, 1), 2, rtol=4e-9),
|
| 474 |
+
data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data',
|
| 475 |
+
(0, 1j), 2, rtol=4e-9),
|
| 476 |
+
|
| 477 |
+
data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1,
|
| 478 |
+
param_filter=(lambda s: s > 1)),
|
| 479 |
+
data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1,
|
| 480 |
+
param_filter=(lambda s: s > 1)),
|
| 481 |
+
data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1,
|
| 482 |
+
param_filter=(lambda s: s > 1)),
|
| 483 |
+
data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1,
|
| 484 |
+
param_filter=(lambda s: s > 1)),
|
| 485 |
+
|
| 486 |
+
data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data',
|
| 487 |
+
(0,1), 2, rtol=1e-11),
|
| 488 |
+
data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data',
|
| 489 |
+
(0,1), 2, rtol=1e-14),
|
| 490 |
+
data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data',
|
| 491 |
+
(0,1), 2, rtol=1e-11),
|
| 492 |
+
|
| 493 |
+
data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data',
|
| 494 |
+
(0,1), 3, rtol=1e-12),
|
| 495 |
+
data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data',
|
| 496 |
+
(0,1), 3, rtol=1e-14),
|
| 497 |
+
data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data',
|
| 498 |
+
(0,1), 3, rtol=1e-14),
|
| 499 |
+
|
| 500 |
+
data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data',
|
| 501 |
+
(0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'),
|
| 502 |
+
data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data',
|
| 503 |
+
(0,1), 2, rtol=3e-15),
|
| 504 |
+
data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data',
|
| 505 |
+
(0,1), 2),
|
| 506 |
+
data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data',
|
| 507 |
+
(0,1), 2, knownfailure='gdtrix bad some points'),
|
| 508 |
+
data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data',
|
| 509 |
+
(0,1), 3, rtol=6e-15),
|
| 510 |
+
data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data',
|
| 511 |
+
(0,1), 3),
|
| 512 |
+
|
| 513 |
+
data(chndtr, 'nccs_ipp-nccs',
|
| 514 |
+
(2,0,1), 3, rtol=3e-5),
|
| 515 |
+
data(chndtr, 'nccs_big_ipp-nccs_big',
|
| 516 |
+
(2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'),
|
| 517 |
+
|
| 518 |
+
data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic',
|
| 519 |
+
(1,0,3,2), (4,5), rtol=5e-11,
|
| 520 |
+
param_filter=(lambda p: np.ones(p.shape, '?'),
|
| 521 |
+
lambda p: np.ones(p.shape, '?'),
|
| 522 |
+
lambda p: np.logical_and(p < 2*np.pi, p >= 0),
|
| 523 |
+
lambda p: np.logical_and(p < np.pi, p >= 0))),
|
| 524 |
+
|
| 525 |
+
data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data',
|
| 526 |
+
(0,1), 2, rtol=1e-13),
|
| 527 |
+
data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data',
|
| 528 |
+
(0,1), 2, rtol=8e-15),
|
| 529 |
+
|
| 530 |
+
data(owens_t, 'owens_t_ipp-owens_t',
|
| 531 |
+
(0, 1), 2, rtol=5e-14),
|
| 532 |
+
data(owens_t, 'owens_t_large_data_ipp-owens_t_large_data',
|
| 533 |
+
(0, 1), 2, rtol=8e-12),
|
| 534 |
+
|
| 535 |
+
# -- test data exists in boost but is not used in scipy --
|
| 536 |
+
|
| 537 |
+
# ibeta_derivative_data_ipp/ibeta_derivative_data.txt
|
| 538 |
+
# ibeta_derivative_int_data_ipp/ibeta_derivative_int_data.txt
|
| 539 |
+
# ibeta_derivative_large_data_ipp/ibeta_derivative_large_data.txt
|
| 540 |
+
# ibeta_derivative_small_data_ipp/ibeta_derivative_small_data.txt
|
| 541 |
+
|
| 542 |
+
# bessel_y01_prime_data_ipp/bessel_y01_prime_data.txt
|
| 543 |
+
# bessel_yn_prime_data_ipp/bessel_yn_prime_data.txt
|
| 544 |
+
# sph_bessel_prime_data_ipp/sph_bessel_prime_data.txt
|
| 545 |
+
# sph_neumann_prime_data_ipp/sph_neumann_prime_data.txt
|
| 546 |
+
|
| 547 |
+
# ellint_d2_data_ipp/ellint_d2_data.txt
|
| 548 |
+
# ellint_d_data_ipp/ellint_d_data.txt
|
| 549 |
+
# ellint_pi2_data_ipp/ellint_pi2_data.txt
|
| 550 |
+
# ellint_pi3_data_ipp/ellint_pi3_data.txt
|
| 551 |
+
# ellint_pi3_large_data_ipp/ellint_pi3_large_data.txt
|
| 552 |
+
data(elliprc, 'ellint_rc_data_ipp-ellint_rc_data', (0, 1), 2,
|
| 553 |
+
rtol=5e-16),
|
| 554 |
+
data(elliprd, 'ellint_rd_data_ipp-ellint_rd_data', (0, 1, 2), 3,
|
| 555 |
+
rtol=5e-16),
|
| 556 |
+
data(elliprd, 'ellint_rd_0xy_ipp-ellint_rd_0xy', (0, 1, 2), 3,
|
| 557 |
+
rtol=5e-16),
|
| 558 |
+
data(elliprd, 'ellint_rd_0yy_ipp-ellint_rd_0yy', (0, 1, 2), 3,
|
| 559 |
+
rtol=5e-16),
|
| 560 |
+
data(elliprd, 'ellint_rd_xxx_ipp-ellint_rd_xxx', (0, 1, 2), 3,
|
| 561 |
+
rtol=5e-16),
|
| 562 |
+
# Some of the following rtol for elliprd may be larger than 5e-16 to
|
| 563 |
+
# work around some hard cases in the Boost test where we get slightly
|
| 564 |
+
# larger error than the ideal bound when the x (==y) input is close to
|
| 565 |
+
# zero.
|
| 566 |
+
# Also the accuracy on 32-bit builds with g++ may suffer from excess
|
| 567 |
+
# loss of precision; see GCC bugzilla 323
|
| 568 |
+
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
|
| 569 |
+
data(elliprd, 'ellint_rd_xxz_ipp-ellint_rd_xxz', (0, 1, 2), 3,
|
| 570 |
+
rtol=6.5e-16),
|
| 571 |
+
data(elliprd, 'ellint_rd_xyy_ipp-ellint_rd_xyy', (0, 1, 2), 3,
|
| 572 |
+
rtol=6e-16),
|
| 573 |
+
data(elliprf, 'ellint_rf_data_ipp-ellint_rf_data', (0, 1, 2), 3,
|
| 574 |
+
rtol=5e-16),
|
| 575 |
+
data(elliprf, 'ellint_rf_xxx_ipp-ellint_rf_xxx', (0, 1, 2), 3,
|
| 576 |
+
rtol=5e-16),
|
| 577 |
+
data(elliprf, 'ellint_rf_xyy_ipp-ellint_rf_xyy', (0, 1, 2), 3,
|
| 578 |
+
rtol=5e-16),
|
| 579 |
+
data(elliprf, 'ellint_rf_xy0_ipp-ellint_rf_xy0', (0, 1, 2), 3,
|
| 580 |
+
rtol=5e-16),
|
| 581 |
+
data(elliprf, 'ellint_rf_0yy_ipp-ellint_rf_0yy', (0, 1, 2), 3,
|
| 582 |
+
rtol=5e-16),
|
| 583 |
+
# The accuracy of R_G is primarily limited by R_D that is used
|
| 584 |
+
# internally. It is generally worse than R_D. Notice that we increased
|
| 585 |
+
# the rtol for R_G here. The cases with duplicate arguments are
|
| 586 |
+
# slightly less likely to be unbalanced (at least two arguments are
|
| 587 |
+
# already balanced) so the error bound is slightly better. Again,
|
| 588 |
+
# precision with g++ 32-bit is even worse.
|
| 589 |
+
data(elliprg, 'ellint_rg_ipp-ellint_rg', (0, 1, 2), 3,
|
| 590 |
+
rtol=8.0e-16),
|
| 591 |
+
data(elliprg, 'ellint_rg_xxx_ipp-ellint_rg_xxx', (0, 1, 2), 3,
|
| 592 |
+
rtol=6e-16),
|
| 593 |
+
data(elliprg, 'ellint_rg_xyy_ipp-ellint_rg_xyy', (0, 1, 2), 3,
|
| 594 |
+
rtol=7.5e-16),
|
| 595 |
+
data(elliprg, 'ellint_rg_xy0_ipp-ellint_rg_xy0', (0, 1, 2), 3,
|
| 596 |
+
rtol=5e-16),
|
| 597 |
+
data(elliprg, 'ellint_rg_00x_ipp-ellint_rg_00x', (0, 1, 2), 3,
|
| 598 |
+
rtol=5e-16),
|
| 599 |
+
data(elliprj, 'ellint_rj_data_ipp-ellint_rj_data', (0, 1, 2, 3), 4,
|
| 600 |
+
rtol=5e-16, atol=1e-25,
|
| 601 |
+
param_filter=(lambda s: s <= 5e-26,)),
|
| 602 |
+
# ellint_rc_data_ipp/ellint_rc_data.txt
|
| 603 |
+
# ellint_rd_0xy_ipp/ellint_rd_0xy.txt
|
| 604 |
+
# ellint_rd_0yy_ipp/ellint_rd_0yy.txt
|
| 605 |
+
# ellint_rd_data_ipp/ellint_rd_data.txt
|
| 606 |
+
# ellint_rd_xxx_ipp/ellint_rd_xxx.txt
|
| 607 |
+
# ellint_rd_xxz_ipp/ellint_rd_xxz.txt
|
| 608 |
+
# ellint_rd_xyy_ipp/ellint_rd_xyy.txt
|
| 609 |
+
# ellint_rf_0yy_ipp/ellint_rf_0yy.txt
|
| 610 |
+
# ellint_rf_data_ipp/ellint_rf_data.txt
|
| 611 |
+
# ellint_rf_xxx_ipp/ellint_rf_xxx.txt
|
| 612 |
+
# ellint_rf_xy0_ipp/ellint_rf_xy0.txt
|
| 613 |
+
# ellint_rf_xyy_ipp/ellint_rf_xyy.txt
|
| 614 |
+
# ellint_rg_00x_ipp/ellint_rg_00x.txt
|
| 615 |
+
# ellint_rg_ipp/ellint_rg.txt
|
| 616 |
+
# ellint_rg_xxx_ipp/ellint_rg_xxx.txt
|
| 617 |
+
# ellint_rg_xy0_ipp/ellint_rg_xy0.txt
|
| 618 |
+
# ellint_rg_xyy_ipp/ellint_rg_xyy.txt
|
| 619 |
+
# ellint_rj_data_ipp/ellint_rj_data.txt
|
| 620 |
+
# ellint_rj_e2_ipp/ellint_rj_e2.txt
|
| 621 |
+
# ellint_rj_e3_ipp/ellint_rj_e3.txt
|
| 622 |
+
# ellint_rj_e4_ipp/ellint_rj_e4.txt
|
| 623 |
+
# ellint_rj_zp_ipp/ellint_rj_zp.txt
|
| 624 |
+
|
| 625 |
+
# jacobi_elliptic_ipp/jacobi_elliptic.txt
|
| 626 |
+
# jacobi_elliptic_small_ipp/jacobi_elliptic_small.txt
|
| 627 |
+
# jacobi_large_phi_ipp/jacobi_large_phi.txt
|
| 628 |
+
# jacobi_near_1_ipp/jacobi_near_1.txt
|
| 629 |
+
# jacobi_zeta_big_phi_ipp/jacobi_zeta_big_phi.txt
|
| 630 |
+
# jacobi_zeta_data_ipp/jacobi_zeta_data.txt
|
| 631 |
+
|
| 632 |
+
# heuman_lambda_data_ipp/heuman_lambda_data.txt
|
| 633 |
+
|
| 634 |
+
# hypergeometric_0F2_ipp/hypergeometric_0F2.txt
|
| 635 |
+
# hypergeometric_1F1_big_ipp/hypergeometric_1F1_big.txt
|
| 636 |
+
# hypergeometric_1F1_ipp/hypergeometric_1F1.txt
|
| 637 |
+
# hypergeometric_1F1_small_random_ipp/hypergeometric_1F1_small_random.txt
|
| 638 |
+
# hypergeometric_1F2_ipp/hypergeometric_1F2.txt
|
| 639 |
+
# hypergeometric_1f1_large_regularized_ipp/hypergeometric_1f1_large_regularized.txt # noqa: E501
|
| 640 |
+
# hypergeometric_1f1_log_large_unsolved_ipp/hypergeometric_1f1_log_large_unsolved.txt # noqa: E501
|
| 641 |
+
# hypergeometric_2F0_half_ipp/hypergeometric_2F0_half.txt
|
| 642 |
+
# hypergeometric_2F0_integer_a2_ipp/hypergeometric_2F0_integer_a2.txt
|
| 643 |
+
# hypergeometric_2F0_ipp/hypergeometric_2F0.txt
|
| 644 |
+
# hypergeometric_2F0_large_z_ipp/hypergeometric_2F0_large_z.txt
|
| 645 |
+
# hypergeometric_2F1_ipp/hypergeometric_2F1.txt
|
| 646 |
+
# hypergeometric_2F2_ipp/hypergeometric_2F2.txt
|
| 647 |
+
|
| 648 |
+
# ncbeta_big_ipp/ncbeta_big.txt
|
| 649 |
+
# nct_small_delta_ipp/nct_small_delta.txt
|
| 650 |
+
# nct_asym_ipp/nct_asym.txt
|
| 651 |
+
# ncbeta_ipp/ncbeta.txt
|
| 652 |
+
|
| 653 |
+
# powm1_data_ipp/powm1_big_data.txt
|
| 654 |
+
# powm1_sqrtp1m1_test_hpp/sqrtp1m1_data.txt
|
| 655 |
+
|
| 656 |
+
# sinc_data_ipp/sinc_data.txt
|
| 657 |
+
|
| 658 |
+
# test_gamma_data_ipp/gammap1m1_data.txt
|
| 659 |
+
# tgamma_ratio_data_ipp/tgamma_ratio_data.txt
|
| 660 |
+
|
| 661 |
+
# trig_data_ipp/trig_data.txt
|
| 662 |
+
# trig_data2_ipp/trig_data2.txt
|
| 663 |
+
]
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr)
|
| 667 |
+
def test_boost(test):
|
| 668 |
+
# Filter deprecation warnings of any deprecated functions.
|
| 669 |
+
if test.func in [btdtr, btdtri, btdtri_comp]:
|
| 670 |
+
with pytest.deprecated_call():
|
| 671 |
+
_test_factory(test)
|
| 672 |
+
else:
|
| 673 |
+
_test_factory(test)
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
GSL_TESTS = [
|
| 677 |
+
data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13),
|
| 678 |
+
data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13),
|
| 679 |
+
|
| 680 |
+
# Also the GSL output has limited accuracy...
|
| 681 |
+
data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
|
| 682 |
+
data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
|
| 683 |
+
|
| 684 |
+
data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms',
|
| 685 |
+
(0, 1, 2), 3, rtol=1e-7, atol=1e-13),
|
| 686 |
+
data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms',
|
| 687 |
+
(0, 1, 2), 4, rtol=1e-7, atol=1e-13),
|
| 688 |
+
|
| 689 |
+
data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms',
|
| 690 |
+
(0, 1, 2), 5, rtol=1e-7, atol=1e-13),
|
| 691 |
+
data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms',
|
| 692 |
+
(0, 1, 2), 6, rtol=1e-7, atol=1e-13),
|
| 693 |
+
]
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
@pytest.mark.parametrize('test', GSL_TESTS, ids=repr)
|
| 697 |
+
def test_gsl(test):
|
| 698 |
+
_test_factory(test)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
LOCAL_TESTS = [
|
| 702 |
+
data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2),
|
| 703 |
+
data_local(ellipkm1, 'ellipkm1', 0, 1),
|
| 704 |
+
data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2),
|
| 705 |
+
data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14),
|
| 706 |
+
data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14),
|
| 707 |
+
data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12),
|
| 708 |
+
data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11),
|
| 709 |
+
data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13),
|
| 710 |
+
data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13),
|
| 711 |
+
data_local(wright_bessel, 'wright_bessel', (0, 1, 2), 3, rtol=1e-11),
|
| 712 |
+
]
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr)
|
| 716 |
+
def test_local(test):
|
| 717 |
+
_test_factory(test)
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
def _test_factory(test, dtype=np.float64):
|
| 721 |
+
"""Boost test"""
|
| 722 |
+
with suppress_warnings() as sup:
|
| 723 |
+
sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected")
|
| 724 |
+
with np.errstate(all='ignore'):
|
| 725 |
+
test.check(dtype=dtype)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_digamma.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy import pi, log, sqrt
|
| 3 |
+
from numpy.testing import assert_, assert_equal
|
| 4 |
+
|
| 5 |
+
from scipy.special._testutils import FuncData
|
| 6 |
+
import scipy.special as sc
|
| 7 |
+
|
| 8 |
+
# Euler-Mascheroni constant
|
| 9 |
+
euler = 0.57721566490153286
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_consistency():
|
| 13 |
+
# Make sure the implementation of digamma for real arguments
|
| 14 |
+
# agrees with the implementation of digamma for complex arguments.
|
| 15 |
+
|
| 16 |
+
# It's all poles after -1e16
|
| 17 |
+
x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)]
|
| 18 |
+
dataset = np.vstack((x + 0j, sc.digamma(x))).T
|
| 19 |
+
FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_special_values():
|
| 23 |
+
# Test special values from Gauss's digamma theorem. See
|
| 24 |
+
#
|
| 25 |
+
# https://en.wikipedia.org/wiki/Digamma_function
|
| 26 |
+
|
| 27 |
+
dataset = [
|
| 28 |
+
(1, -euler),
|
| 29 |
+
(0.5, -2*log(2) - euler),
|
| 30 |
+
(1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler),
|
| 31 |
+
(1/4, -pi/2 - 3*log(2) - euler),
|
| 32 |
+
(1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler),
|
| 33 |
+
(1/8,
|
| 34 |
+
-pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler)
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
dataset = np.asarray(dataset)
|
| 38 |
+
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_nonfinite():
|
| 42 |
+
pts = [0.0, -0.0, np.inf]
|
| 43 |
+
std = [-np.inf, np.inf, np.inf]
|
| 44 |
+
assert_equal(sc.digamma(pts), std)
|
| 45 |
+
assert_(all(np.isnan(sc.digamma([-np.inf, -1]))))
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_faddeeva.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_allclose
|
| 5 |
+
import scipy.special as sc
|
| 6 |
+
from scipy.special._testutils import FuncData
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestVoigtProfile:
|
| 10 |
+
|
| 11 |
+
@pytest.mark.parametrize('x, sigma, gamma', [
|
| 12 |
+
(np.nan, 1, 1),
|
| 13 |
+
(0, np.nan, 1),
|
| 14 |
+
(0, 1, np.nan),
|
| 15 |
+
(1, np.nan, 0),
|
| 16 |
+
(np.nan, 1, 0),
|
| 17 |
+
(1, 0, np.nan),
|
| 18 |
+
(np.nan, 0, 1),
|
| 19 |
+
(np.nan, 0, 0)
|
| 20 |
+
])
|
| 21 |
+
def test_nan(self, x, sigma, gamma):
|
| 22 |
+
assert np.isnan(sc.voigt_profile(x, sigma, gamma))
|
| 23 |
+
|
| 24 |
+
@pytest.mark.parametrize('x, desired', [
|
| 25 |
+
(-np.inf, 0),
|
| 26 |
+
(np.inf, 0)
|
| 27 |
+
])
|
| 28 |
+
def test_inf(self, x, desired):
|
| 29 |
+
assert sc.voigt_profile(x, 1, 1) == desired
|
| 30 |
+
|
| 31 |
+
def test_against_mathematica(self):
|
| 32 |
+
# Results obtained from Mathematica by computing
|
| 33 |
+
#
|
| 34 |
+
# PDF[VoigtDistribution[gamma, sigma], x]
|
| 35 |
+
#
|
| 36 |
+
points = np.array([
|
| 37 |
+
[-7.89, 45.06, 6.66, 0.0077921073660388806401],
|
| 38 |
+
[-0.05, 7.98, 24.13, 0.012068223646769913478],
|
| 39 |
+
[-13.98, 16.83, 42.37, 0.0062442236362132357833],
|
| 40 |
+
[-12.66, 0.21, 6.32, 0.010052516161087379402],
|
| 41 |
+
[11.34, 4.25, 21.96, 0.0113698923627278917805],
|
| 42 |
+
[-11.56, 20.40, 30.53, 0.0076332760432097464987],
|
| 43 |
+
[-9.17, 25.61, 8.32, 0.011646345779083005429],
|
| 44 |
+
[16.59, 18.05, 2.50, 0.013637768837526809181],
|
| 45 |
+
[9.11, 2.12, 39.33, 0.0076644040807277677585],
|
| 46 |
+
[-43.33, 0.30, 45.68, 0.0036680463875330150996]
|
| 47 |
+
])
|
| 48 |
+
FuncData(
|
| 49 |
+
sc.voigt_profile,
|
| 50 |
+
points,
|
| 51 |
+
(0, 1, 2),
|
| 52 |
+
3,
|
| 53 |
+
atol=0,
|
| 54 |
+
rtol=1e-15
|
| 55 |
+
).check()
|
| 56 |
+
|
| 57 |
+
def test_symmetry(self):
|
| 58 |
+
x = np.linspace(0, 10, 20)
|
| 59 |
+
assert_allclose(
|
| 60 |
+
sc.voigt_profile(x, 1, 1),
|
| 61 |
+
sc.voigt_profile(-x, 1, 1),
|
| 62 |
+
rtol=1e-15,
|
| 63 |
+
atol=0
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
@pytest.mark.parametrize('x, sigma, gamma, desired', [
|
| 67 |
+
(0, 0, 0, np.inf),
|
| 68 |
+
(1, 0, 0, 0)
|
| 69 |
+
])
|
| 70 |
+
def test_corner_cases(self, x, sigma, gamma, desired):
|
| 71 |
+
assert sc.voigt_profile(x, sigma, gamma) == desired
|
| 72 |
+
|
| 73 |
+
@pytest.mark.parametrize('sigma1, gamma1, sigma2, gamma2', [
|
| 74 |
+
(0, 1, 1e-16, 1),
|
| 75 |
+
(1, 0, 1, 1e-16),
|
| 76 |
+
(0, 0, 1e-16, 1e-16)
|
| 77 |
+
])
|
| 78 |
+
def test_continuity(self, sigma1, gamma1, sigma2, gamma2):
|
| 79 |
+
x = np.linspace(1, 10, 20)
|
| 80 |
+
assert_allclose(
|
| 81 |
+
sc.voigt_profile(x, sigma1, gamma1),
|
| 82 |
+
sc.voigt_profile(x, sigma2, gamma2),
|
| 83 |
+
rtol=1e-16,
|
| 84 |
+
atol=1e-16
|
| 85 |
+
)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_gammainc.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
| 5 |
+
|
| 6 |
+
import scipy.special as sc
|
| 7 |
+
from scipy.special._testutils import FuncData
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
INVALID_POINTS = [
|
| 11 |
+
(1, -1),
|
| 12 |
+
(0, 0),
|
| 13 |
+
(-1, 1),
|
| 14 |
+
(np.nan, 1),
|
| 15 |
+
(1, np.nan)
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestGammainc:
|
| 20 |
+
|
| 21 |
+
@pytest.mark.parametrize('a, x', INVALID_POINTS)
|
| 22 |
+
def test_domain(self, a, x):
|
| 23 |
+
assert np.isnan(sc.gammainc(a, x))
|
| 24 |
+
|
| 25 |
+
def test_a_eq_0_x_gt_0(self):
|
| 26 |
+
assert sc.gammainc(0, 1) == 1
|
| 27 |
+
|
| 28 |
+
@pytest.mark.parametrize('a, x, desired', [
|
| 29 |
+
(np.inf, 1, 0),
|
| 30 |
+
(np.inf, 0, 0),
|
| 31 |
+
(np.inf, np.inf, np.nan),
|
| 32 |
+
(1, np.inf, 1)
|
| 33 |
+
])
|
| 34 |
+
def test_infinite_arguments(self, a, x, desired):
|
| 35 |
+
result = sc.gammainc(a, x)
|
| 36 |
+
if np.isnan(desired):
|
| 37 |
+
assert np.isnan(result)
|
| 38 |
+
else:
|
| 39 |
+
assert result == desired
|
| 40 |
+
|
| 41 |
+
def test_infinite_limits(self):
|
| 42 |
+
# Test that large arguments converge to the hard-coded limits
|
| 43 |
+
# at infinity.
|
| 44 |
+
assert_allclose(
|
| 45 |
+
sc.gammainc(1000, 100),
|
| 46 |
+
sc.gammainc(np.inf, 100),
|
| 47 |
+
atol=1e-200, # Use `atol` since the function converges to 0.
|
| 48 |
+
rtol=0
|
| 49 |
+
)
|
| 50 |
+
assert sc.gammainc(100, 1000) == sc.gammainc(100, np.inf)
|
| 51 |
+
|
| 52 |
+
def test_x_zero(self):
|
| 53 |
+
a = np.arange(1, 10)
|
| 54 |
+
assert_array_equal(sc.gammainc(a, 0), 0)
|
| 55 |
+
|
| 56 |
+
def test_limit_check(self):
|
| 57 |
+
result = sc.gammainc(1e-10, 1)
|
| 58 |
+
limit = sc.gammainc(0, 1)
|
| 59 |
+
assert np.isclose(result, limit)
|
| 60 |
+
|
| 61 |
+
def gammainc_line(self, x):
|
| 62 |
+
# The line a = x where a simpler asymptotic expansion (analog
|
| 63 |
+
# of DLMF 8.12.15) is available.
|
| 64 |
+
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
|
| 65 |
+
-3184811/3695155200, -2745493/8151736420])
|
| 66 |
+
res = 0
|
| 67 |
+
xfac = 1
|
| 68 |
+
for ck in c:
|
| 69 |
+
res -= ck*xfac
|
| 70 |
+
xfac /= x
|
| 71 |
+
res /= np.sqrt(2*np.pi*x)
|
| 72 |
+
res += 0.5
|
| 73 |
+
return res
|
| 74 |
+
|
| 75 |
+
def test_line(self):
|
| 76 |
+
x = np.logspace(np.log10(25), 300, 500)
|
| 77 |
+
a = x
|
| 78 |
+
dataset = np.vstack((a, x, self.gammainc_line(x))).T
|
| 79 |
+
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
|
| 80 |
+
|
| 81 |
+
def test_roundtrip(self):
|
| 82 |
+
a = np.logspace(-5, 10, 100)
|
| 83 |
+
x = np.logspace(-5, 10, 100)
|
| 84 |
+
|
| 85 |
+
y = sc.gammaincinv(a, sc.gammainc(a, x))
|
| 86 |
+
assert_allclose(x, y, rtol=1e-10)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class TestGammaincc:
|
| 90 |
+
|
| 91 |
+
@pytest.mark.parametrize('a, x', INVALID_POINTS)
|
| 92 |
+
def test_domain(self, a, x):
|
| 93 |
+
assert np.isnan(sc.gammaincc(a, x))
|
| 94 |
+
|
| 95 |
+
def test_a_eq_0_x_gt_0(self):
|
| 96 |
+
assert sc.gammaincc(0, 1) == 0
|
| 97 |
+
|
| 98 |
+
@pytest.mark.parametrize('a, x, desired', [
|
| 99 |
+
(np.inf, 1, 1),
|
| 100 |
+
(np.inf, 0, 1),
|
| 101 |
+
(np.inf, np.inf, np.nan),
|
| 102 |
+
(1, np.inf, 0)
|
| 103 |
+
])
|
| 104 |
+
def test_infinite_arguments(self, a, x, desired):
|
| 105 |
+
result = sc.gammaincc(a, x)
|
| 106 |
+
if np.isnan(desired):
|
| 107 |
+
assert np.isnan(result)
|
| 108 |
+
else:
|
| 109 |
+
assert result == desired
|
| 110 |
+
|
| 111 |
+
def test_infinite_limits(self):
|
| 112 |
+
# Test that large arguments converge to the hard-coded limits
|
| 113 |
+
# at infinity.
|
| 114 |
+
assert sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100)
|
| 115 |
+
assert_allclose(
|
| 116 |
+
sc.gammaincc(100, 1000),
|
| 117 |
+
sc.gammaincc(100, np.inf),
|
| 118 |
+
atol=1e-200, # Use `atol` since the function converges to 0.
|
| 119 |
+
rtol=0
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
def test_limit_check(self):
|
| 123 |
+
result = sc.gammaincc(1e-10,1)
|
| 124 |
+
limit = sc.gammaincc(0,1)
|
| 125 |
+
assert np.isclose(result, limit)
|
| 126 |
+
|
| 127 |
+
def test_x_zero(self):
|
| 128 |
+
a = np.arange(1, 10)
|
| 129 |
+
assert_array_equal(sc.gammaincc(a, 0), 1)
|
| 130 |
+
|
| 131 |
+
def test_roundtrip(self):
|
| 132 |
+
a = np.logspace(-5, 10, 100)
|
| 133 |
+
x = np.logspace(-5, 10, 100)
|
| 134 |
+
|
| 135 |
+
y = sc.gammainccinv(a, sc.gammaincc(a, x))
|
| 136 |
+
assert_allclose(x, y, rtol=1e-14)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_loggamma.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_allclose, assert_
|
| 3 |
+
|
| 4 |
+
from scipy.special._testutils import FuncData
|
| 5 |
+
from scipy.special import gamma, gammaln, loggamma
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_identities1():
|
| 9 |
+
# test the identity exp(loggamma(z)) = gamma(z)
|
| 10 |
+
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
|
| 11 |
+
y = x.copy()
|
| 12 |
+
x, y = np.meshgrid(x, y)
|
| 13 |
+
z = (x + 1J*y).flatten()
|
| 14 |
+
dataset = np.vstack((z, gamma(z))).T
|
| 15 |
+
|
| 16 |
+
def f(z):
|
| 17 |
+
return np.exp(loggamma(z))
|
| 18 |
+
|
| 19 |
+
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_identities2():
|
| 23 |
+
# test the identity loggamma(z + 1) = log(z) + loggamma(z)
|
| 24 |
+
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
|
| 25 |
+
y = x.copy()
|
| 26 |
+
x, y = np.meshgrid(x, y)
|
| 27 |
+
z = (x + 1J*y).flatten()
|
| 28 |
+
dataset = np.vstack((z, np.log(z) + loggamma(z))).T
|
| 29 |
+
|
| 30 |
+
def f(z):
|
| 31 |
+
return loggamma(z + 1)
|
| 32 |
+
|
| 33 |
+
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_complex_dispatch_realpart():
|
| 37 |
+
# Test that the real parts of loggamma and gammaln agree on the
|
| 38 |
+
# real axis.
|
| 39 |
+
x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5
|
| 40 |
+
|
| 41 |
+
dataset = np.vstack((x, gammaln(x))).T
|
| 42 |
+
|
| 43 |
+
def f(z):
|
| 44 |
+
z = np.array(z, dtype='complex128')
|
| 45 |
+
return loggamma(z).real
|
| 46 |
+
|
| 47 |
+
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def test_real_dispatch():
|
| 51 |
+
x = np.logspace(-10, 10) + 0.5
|
| 52 |
+
dataset = np.vstack((x, gammaln(x))).T
|
| 53 |
+
|
| 54 |
+
FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
|
| 55 |
+
assert_(loggamma(0) == np.inf)
|
| 56 |
+
assert_(np.isnan(loggamma(-1)))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def test_gh_6536():
|
| 60 |
+
z = loggamma(complex(-3.4, +0.0))
|
| 61 |
+
zbar = loggamma(complex(-3.4, -0.0))
|
| 62 |
+
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_branch_cut():
|
| 66 |
+
# Make sure negative zero is treated correctly
|
| 67 |
+
x = -np.logspace(300, -30, 100)
|
| 68 |
+
z = np.asarray([complex(x0, 0.0) for x0 in x])
|
| 69 |
+
zbar = np.asarray([complex(x0, -0.0) for x0 in x])
|
| 70 |
+
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_logsumexp.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
|
| 3 |
+
assert_array_almost_equal, assert_)
|
| 4 |
+
|
| 5 |
+
from scipy.special import logsumexp, softmax
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_logsumexp():
|
| 9 |
+
# Test with zero-size array
|
| 10 |
+
a = []
|
| 11 |
+
desired = -np.inf
|
| 12 |
+
assert_equal(logsumexp(a), desired)
|
| 13 |
+
|
| 14 |
+
# Test whether logsumexp() function correctly handles large inputs.
|
| 15 |
+
a = np.arange(200)
|
| 16 |
+
desired = np.log(np.sum(np.exp(a)))
|
| 17 |
+
assert_almost_equal(logsumexp(a), desired)
|
| 18 |
+
|
| 19 |
+
# Now test with large numbers
|
| 20 |
+
b = [1000, 1000]
|
| 21 |
+
desired = 1000.0 + np.log(2.0)
|
| 22 |
+
assert_almost_equal(logsumexp(b), desired)
|
| 23 |
+
|
| 24 |
+
n = 1000
|
| 25 |
+
b = np.full(n, 10000, dtype='float64')
|
| 26 |
+
desired = 10000.0 + np.log(n)
|
| 27 |
+
assert_almost_equal(logsumexp(b), desired)
|
| 28 |
+
|
| 29 |
+
x = np.array([1e-40] * 1000000)
|
| 30 |
+
logx = np.log(x)
|
| 31 |
+
|
| 32 |
+
X = np.vstack([x, x])
|
| 33 |
+
logX = np.vstack([logx, logx])
|
| 34 |
+
assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
|
| 35 |
+
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
|
| 36 |
+
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
|
| 37 |
+
|
| 38 |
+
# Handling special values properly
|
| 39 |
+
assert_equal(logsumexp(np.inf), np.inf)
|
| 40 |
+
assert_equal(logsumexp(-np.inf), -np.inf)
|
| 41 |
+
assert_equal(logsumexp(np.nan), np.nan)
|
| 42 |
+
assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf)
|
| 43 |
+
|
| 44 |
+
# Handling an array with different magnitudes on the axes
|
| 45 |
+
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
|
| 46 |
+
[-1e10, -np.inf]], axis=-1),
|
| 47 |
+
[1e10, -1e10])
|
| 48 |
+
|
| 49 |
+
# Test keeping dimensions
|
| 50 |
+
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
|
| 51 |
+
[-1e10, -np.inf]],
|
| 52 |
+
axis=-1,
|
| 53 |
+
keepdims=True),
|
| 54 |
+
[[1e10], [-1e10]])
|
| 55 |
+
|
| 56 |
+
# Test multiple axes
|
| 57 |
+
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
|
| 58 |
+
[-1e10, -np.inf]],
|
| 59 |
+
axis=(-1,-2)),
|
| 60 |
+
1e10)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def test_logsumexp_b():
|
| 64 |
+
a = np.arange(200)
|
| 65 |
+
b = np.arange(200, 0, -1)
|
| 66 |
+
desired = np.log(np.sum(b*np.exp(a)))
|
| 67 |
+
assert_almost_equal(logsumexp(a, b=b), desired)
|
| 68 |
+
|
| 69 |
+
a = [1000, 1000]
|
| 70 |
+
b = [1.2, 1.2]
|
| 71 |
+
desired = 1000 + np.log(2 * 1.2)
|
| 72 |
+
assert_almost_equal(logsumexp(a, b=b), desired)
|
| 73 |
+
|
| 74 |
+
x = np.array([1e-40] * 100000)
|
| 75 |
+
b = np.linspace(1, 1000, 100000)
|
| 76 |
+
logx = np.log(x)
|
| 77 |
+
|
| 78 |
+
X = np.vstack((x, x))
|
| 79 |
+
logX = np.vstack((logx, logx))
|
| 80 |
+
B = np.vstack((b, b))
|
| 81 |
+
assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
|
| 82 |
+
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
|
| 83 |
+
(B * X).sum(axis=0))
|
| 84 |
+
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
|
| 85 |
+
(B * X).sum(axis=1))
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def test_logsumexp_sign():
|
| 89 |
+
a = [1,1,1]
|
| 90 |
+
b = [1,-1,-1]
|
| 91 |
+
|
| 92 |
+
r, s = logsumexp(a, b=b, return_sign=True)
|
| 93 |
+
assert_almost_equal(r,1)
|
| 94 |
+
assert_equal(s,-1)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_logsumexp_sign_zero():
|
| 98 |
+
a = [1,1]
|
| 99 |
+
b = [1,-1]
|
| 100 |
+
|
| 101 |
+
r, s = logsumexp(a, b=b, return_sign=True)
|
| 102 |
+
assert_(not np.isfinite(r))
|
| 103 |
+
assert_(not np.isnan(r))
|
| 104 |
+
assert_(r < 0)
|
| 105 |
+
assert_equal(s,0)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def test_logsumexp_sign_shape():
|
| 109 |
+
a = np.ones((1,2,3,4))
|
| 110 |
+
b = np.ones_like(a)
|
| 111 |
+
|
| 112 |
+
r, s = logsumexp(a, axis=2, b=b, return_sign=True)
|
| 113 |
+
|
| 114 |
+
assert_equal(r.shape, s.shape)
|
| 115 |
+
assert_equal(r.shape, (1,2,4))
|
| 116 |
+
|
| 117 |
+
r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True)
|
| 118 |
+
|
| 119 |
+
assert_equal(r.shape, s.shape)
|
| 120 |
+
assert_equal(r.shape, (1,3))
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def test_logsumexp_complex_sign():
|
| 124 |
+
a = np.array([1 + 1j, 2 - 1j, -2 + 3j])
|
| 125 |
+
|
| 126 |
+
r, s = logsumexp(a, return_sign=True)
|
| 127 |
+
|
| 128 |
+
expected_sumexp = np.exp(a).sum()
|
| 129 |
+
# This is the numpy>=2.0 convention for np.sign
|
| 130 |
+
expected_sign = expected_sumexp / abs(expected_sumexp)
|
| 131 |
+
|
| 132 |
+
assert_allclose(s, expected_sign)
|
| 133 |
+
assert_allclose(s * np.exp(r), expected_sumexp)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_logsumexp_shape():
|
| 137 |
+
a = np.ones((1, 2, 3, 4))
|
| 138 |
+
b = np.ones_like(a)
|
| 139 |
+
|
| 140 |
+
r = logsumexp(a, axis=2, b=b)
|
| 141 |
+
assert_equal(r.shape, (1, 2, 4))
|
| 142 |
+
|
| 143 |
+
r = logsumexp(a, axis=(1, 3), b=b)
|
| 144 |
+
assert_equal(r.shape, (1, 3))
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def test_logsumexp_b_zero():
|
| 148 |
+
a = [1,10000]
|
| 149 |
+
b = [1,0]
|
| 150 |
+
|
| 151 |
+
assert_almost_equal(logsumexp(a, b=b), 1)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def test_logsumexp_b_shape():
|
| 155 |
+
a = np.zeros((4,1,2,1))
|
| 156 |
+
b = np.ones((3,1,5))
|
| 157 |
+
|
| 158 |
+
logsumexp(a, b=b)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def test_softmax_fixtures():
|
| 162 |
+
assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]),
|
| 163 |
+
rtol=1e-13)
|
| 164 |
+
assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13)
|
| 165 |
+
assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e),
|
| 166 |
+
rtol=1e-13)
|
| 167 |
+
|
| 168 |
+
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
|
| 169 |
+
# converted to float.
|
| 170 |
+
x = np.arange(4)
|
| 171 |
+
expected = np.array([0.03205860328008499,
|
| 172 |
+
0.08714431874203256,
|
| 173 |
+
0.23688281808991013,
|
| 174 |
+
0.6439142598879722])
|
| 175 |
+
|
| 176 |
+
assert_allclose(softmax(x), expected, rtol=1e-13)
|
| 177 |
+
|
| 178 |
+
# Translation property. If all the values are changed by the same amount,
|
| 179 |
+
# the softmax result does not change.
|
| 180 |
+
assert_allclose(softmax(x + 100), expected, rtol=1e-13)
|
| 181 |
+
|
| 182 |
+
# When axis=None, softmax operates on the entire array, and preserves
|
| 183 |
+
# the shape.
|
| 184 |
+
assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2),
|
| 185 |
+
rtol=1e-13)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def test_softmax_multi_axes():
|
| 189 |
+
assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0),
|
| 190 |
+
np.array([[.5, .5], [.5, .5]]), rtol=1e-13)
|
| 191 |
+
assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1),
|
| 192 |
+
np.array([[1, 0], [1, 0]]), rtol=1e-13)
|
| 193 |
+
|
| 194 |
+
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
|
| 195 |
+
# converted to float.
|
| 196 |
+
x = np.array([[-25, 0, 25, 50],
|
| 197 |
+
[1, 325, 749, 750]])
|
| 198 |
+
expected = np.array([[2.678636961770877e-33,
|
| 199 |
+
1.9287498479371314e-22,
|
| 200 |
+
1.3887943864771144e-11,
|
| 201 |
+
0.999999999986112],
|
| 202 |
+
[0.0,
|
| 203 |
+
1.9444526359919372e-185,
|
| 204 |
+
0.2689414213699951,
|
| 205 |
+
0.7310585786300048]])
|
| 206 |
+
assert_allclose(softmax(x, axis=1), expected, rtol=1e-13)
|
| 207 |
+
assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13)
|
| 208 |
+
|
| 209 |
+
# 3-d input, with a tuple for the axis.
|
| 210 |
+
x3d = x.reshape(2, 2, 2)
|
| 211 |
+
assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2),
|
| 212 |
+
rtol=1e-13)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_round.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from scipy.special import _test_internal
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@pytest.mark.fail_slow(5)
|
| 8 |
+
@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()")
|
| 9 |
+
def test_add_round_up():
|
| 10 |
+
np.random.seed(1234)
|
| 11 |
+
_test_internal.test_add_round(10**5, 'up')
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.mark.fail_slow(5)
|
| 15 |
+
@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()")
|
| 16 |
+
def test_add_round_down():
|
| 17 |
+
np.random.seed(1234)
|
| 18 |
+
_test_internal.test_add_round(10**5, 'down')
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_sf_error.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import assert_, assert_equal, IS_PYPY
|
| 6 |
+
import pytest
|
| 7 |
+
from pytest import raises as assert_raises
|
| 8 |
+
|
| 9 |
+
import scipy.special as sc
|
| 10 |
+
from scipy.special._ufuncs import _sf_error_test_function
|
| 11 |
+
|
| 12 |
+
_sf_error_code_map = {
|
| 13 |
+
# skip 'ok'
|
| 14 |
+
'singular': 1,
|
| 15 |
+
'underflow': 2,
|
| 16 |
+
'overflow': 3,
|
| 17 |
+
'slow': 4,
|
| 18 |
+
'loss': 5,
|
| 19 |
+
'no_result': 6,
|
| 20 |
+
'domain': 7,
|
| 21 |
+
'arg': 8,
|
| 22 |
+
'other': 9
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
_sf_error_actions = [
|
| 26 |
+
'ignore',
|
| 27 |
+
'warn',
|
| 28 |
+
'raise'
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _check_action(fun, args, action):
|
| 33 |
+
# TODO: special expert should correct
|
| 34 |
+
# the coercion at the true location?
|
| 35 |
+
args = np.asarray(args, dtype=np.dtype("long"))
|
| 36 |
+
if action == 'warn':
|
| 37 |
+
with pytest.warns(sc.SpecialFunctionWarning):
|
| 38 |
+
fun(*args)
|
| 39 |
+
elif action == 'raise':
|
| 40 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 41 |
+
fun(*args)
|
| 42 |
+
else:
|
| 43 |
+
# action == 'ignore', make sure there are no warnings/exceptions
|
| 44 |
+
with warnings.catch_warnings():
|
| 45 |
+
warnings.simplefilter("error")
|
| 46 |
+
fun(*args)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_geterr():
|
| 50 |
+
err = sc.geterr()
|
| 51 |
+
for key, value in err.items():
|
| 52 |
+
assert_(key in _sf_error_code_map)
|
| 53 |
+
assert_(value in _sf_error_actions)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def test_seterr():
|
| 57 |
+
entry_err = sc.geterr()
|
| 58 |
+
try:
|
| 59 |
+
for category, error_code in _sf_error_code_map.items():
|
| 60 |
+
for action in _sf_error_actions:
|
| 61 |
+
geterr_olderr = sc.geterr()
|
| 62 |
+
seterr_olderr = sc.seterr(**{category: action})
|
| 63 |
+
assert_(geterr_olderr == seterr_olderr)
|
| 64 |
+
newerr = sc.geterr()
|
| 65 |
+
assert_(newerr[category] == action)
|
| 66 |
+
geterr_olderr.pop(category)
|
| 67 |
+
newerr.pop(category)
|
| 68 |
+
assert_(geterr_olderr == newerr)
|
| 69 |
+
_check_action(_sf_error_test_function, (error_code,), action)
|
| 70 |
+
finally:
|
| 71 |
+
sc.seterr(**entry_err)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
| 75 |
+
def test_sf_error_special_refcount():
|
| 76 |
+
# Regression test for gh-16233.
|
| 77 |
+
# Check that the reference count of scipy.special is not increased
|
| 78 |
+
# when a SpecialFunctionError is raised.
|
| 79 |
+
refcount_before = sys.getrefcount(sc)
|
| 80 |
+
with sc.errstate(all='raise'):
|
| 81 |
+
with pytest.raises(sc.SpecialFunctionError, match='domain error'):
|
| 82 |
+
sc.ndtri(2.0)
|
| 83 |
+
refcount_after = sys.getrefcount(sc)
|
| 84 |
+
assert refcount_after == refcount_before
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_errstate_pyx_basic():
|
| 88 |
+
olderr = sc.geterr()
|
| 89 |
+
with sc.errstate(singular='raise'):
|
| 90 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 91 |
+
sc.loggamma(0)
|
| 92 |
+
assert_equal(olderr, sc.geterr())
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def test_errstate_c_basic():
|
| 96 |
+
olderr = sc.geterr()
|
| 97 |
+
with sc.errstate(domain='raise'):
|
| 98 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 99 |
+
sc.spence(-1)
|
| 100 |
+
assert_equal(olderr, sc.geterr())
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_errstate_cpp_basic():
|
| 104 |
+
olderr = sc.geterr()
|
| 105 |
+
with sc.errstate(underflow='raise'):
|
| 106 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 107 |
+
sc.wrightomega(-1000)
|
| 108 |
+
assert_equal(olderr, sc.geterr())
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_errstate_cpp_scipy_special():
|
| 112 |
+
olderr = sc.geterr()
|
| 113 |
+
with sc.errstate(singular='raise'):
|
| 114 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 115 |
+
sc.lambertw(0, 1)
|
| 116 |
+
assert_equal(olderr, sc.geterr())
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def test_errstate_cpp_alt_ufunc_machinery():
|
| 120 |
+
olderr = sc.geterr()
|
| 121 |
+
with sc.errstate(singular='raise'):
|
| 122 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 123 |
+
sc.gammaln(0)
|
| 124 |
+
assert_equal(olderr, sc.geterr())
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def test_errstate():
|
| 128 |
+
for category, error_code in _sf_error_code_map.items():
|
| 129 |
+
for action in _sf_error_actions:
|
| 130 |
+
olderr = sc.geterr()
|
| 131 |
+
with sc.errstate(**{category: action}):
|
| 132 |
+
_check_action(_sf_error_test_function, (error_code,), action)
|
| 133 |
+
assert_equal(olderr, sc.geterr())
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_errstate_all_but_one():
|
| 137 |
+
olderr = sc.geterr()
|
| 138 |
+
with sc.errstate(all='raise', singular='ignore'):
|
| 139 |
+
sc.gammaln(0)
|
| 140 |
+
with assert_raises(sc.SpecialFunctionError):
|
| 141 |
+
sc.spence(-1.0)
|
| 142 |
+
assert_equal(olderr, sc.geterr())
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_spfun_stats.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import (assert_array_equal,
|
| 3 |
+
assert_array_almost_equal_nulp, assert_almost_equal)
|
| 4 |
+
from pytest import raises as assert_raises
|
| 5 |
+
|
| 6 |
+
from scipy.special import gammaln, multigammaln
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestMultiGammaLn:
|
| 10 |
+
|
| 11 |
+
def test1(self):
|
| 12 |
+
# A test of the identity
|
| 13 |
+
# Gamma_1(a) = Gamma(a)
|
| 14 |
+
np.random.seed(1234)
|
| 15 |
+
a = np.abs(np.random.randn())
|
| 16 |
+
assert_array_equal(multigammaln(a, 1), gammaln(a))
|
| 17 |
+
|
| 18 |
+
def test2(self):
|
| 19 |
+
# A test of the identity
|
| 20 |
+
# Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
|
| 21 |
+
a = np.array([2.5, 10.0])
|
| 22 |
+
result = multigammaln(a, 2)
|
| 23 |
+
expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
|
| 24 |
+
assert_almost_equal(result, expected)
|
| 25 |
+
|
| 26 |
+
def test_bararg(self):
|
| 27 |
+
assert_raises(ValueError, multigammaln, 0.5, 1.2)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _check_multigammaln_array_result(a, d):
|
| 31 |
+
# Test that the shape of the array returned by multigammaln
|
| 32 |
+
# matches the input shape, and that all the values match
|
| 33 |
+
# the value computed when multigammaln is called with a scalar.
|
| 34 |
+
result = multigammaln(a, d)
|
| 35 |
+
assert_array_equal(a.shape, result.shape)
|
| 36 |
+
a1 = a.ravel()
|
| 37 |
+
result1 = result.ravel()
|
| 38 |
+
for i in range(a.size):
|
| 39 |
+
assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def test_multigammaln_array_arg():
|
| 43 |
+
# Check that the array returned by multigammaln has the correct
|
| 44 |
+
# shape and contains the correct values. The cases have arrays
|
| 45 |
+
# with several different shapes.
|
| 46 |
+
# The cases include a regression test for ticket #1849
|
| 47 |
+
# (a = np.array([2.0]), an array with a single element).
|
| 48 |
+
np.random.seed(1234)
|
| 49 |
+
|
| 50 |
+
cases = [
|
| 51 |
+
# a, d
|
| 52 |
+
(np.abs(np.random.randn(3, 2)) + 5, 5),
|
| 53 |
+
(np.abs(np.random.randn(1, 2)) + 5, 5),
|
| 54 |
+
(np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
|
| 55 |
+
(np.array([2.0]), 3),
|
| 56 |
+
(np.float64(2.0), 3),
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
for a, d in cases:
|
| 60 |
+
_check_multigammaln_array_result(a, d)
|
| 61 |
+
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_sph_harm.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_allclose
|
| 3 |
+
import scipy.special as sc
|
| 4 |
+
from scipy.special._basic import _sph_harm_all
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_first_harmonics():
|
| 8 |
+
# Test against explicit representations of the first four
|
| 9 |
+
# spherical harmonics which use `theta` as the azimuthal angle,
|
| 10 |
+
# `phi` as the polar angle, and include the Condon-Shortley
|
| 11 |
+
# phase.
|
| 12 |
+
|
| 13 |
+
# Notation is Ymn
|
| 14 |
+
def Y00(theta, phi):
|
| 15 |
+
return 0.5*np.sqrt(1/np.pi)
|
| 16 |
+
|
| 17 |
+
def Yn11(theta, phi):
|
| 18 |
+
return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi)
|
| 19 |
+
|
| 20 |
+
def Y01(theta, phi):
|
| 21 |
+
return 0.5*np.sqrt(3/np.pi)*np.cos(phi)
|
| 22 |
+
|
| 23 |
+
def Y11(theta, phi):
|
| 24 |
+
return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi)
|
| 25 |
+
|
| 26 |
+
harms = [Y00, Yn11, Y01, Y11]
|
| 27 |
+
m = [0, -1, 0, 1]
|
| 28 |
+
n = [0, 1, 1, 1]
|
| 29 |
+
|
| 30 |
+
theta = np.linspace(0, 2*np.pi)
|
| 31 |
+
phi = np.linspace(0, np.pi)
|
| 32 |
+
theta, phi = np.meshgrid(theta, phi)
|
| 33 |
+
|
| 34 |
+
for harm, m, n in zip(harms, m, n):
|
| 35 |
+
assert_allclose(sc.sph_harm(m, n, theta, phi),
|
| 36 |
+
harm(theta, phi),
|
| 37 |
+
rtol=1e-15, atol=1e-15,
|
| 38 |
+
err_msg=f"Y^{m}_{n} incorrect")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_all_harmonics():
|
| 42 |
+
n_max = 50
|
| 43 |
+
|
| 44 |
+
theta = np.linspace(0, 2 * np.pi)
|
| 45 |
+
phi = np.linspace(0, np.pi)
|
| 46 |
+
|
| 47 |
+
y_actual = _sph_harm_all(2 * n_max, n_max, theta, phi)
|
| 48 |
+
|
| 49 |
+
for n in [0, 1, 2, 5, 10, 20, 50]:
|
| 50 |
+
for m in [0, 1, 2, 5, 10, 20, 50]:
|
| 51 |
+
if (m <= n):
|
| 52 |
+
y_desired = sc.sph_harm(m, n, theta, phi)
|
| 53 |
+
else:
|
| 54 |
+
y_desired = 0
|
| 55 |
+
np.testing.assert_allclose(y_actual[m, n], y_desired, rtol = 1e-05)
|
| 56 |
+
|
| 57 |
+
if (m <= n):
|
| 58 |
+
y_desired = sc.sph_harm(-m, n, theta, phi)
|
| 59 |
+
else:
|
| 60 |
+
y_desired = 0
|
| 61 |
+
np.testing.assert_allclose(y_actual[-m, n], y_desired, rtol = 1e-05)
|
parrot/lib/python3.10/site-packages/scipy/special/tests/test_spherical_bessel.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Tests of spherical Bessel functions.
|
| 3 |
+
#
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import (assert_almost_equal, assert_allclose,
|
| 6 |
+
assert_array_almost_equal, suppress_warnings)
|
| 7 |
+
import pytest
|
| 8 |
+
from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi
|
| 9 |
+
|
| 10 |
+
from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn
|
| 11 |
+
from scipy.integrate import quad
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestSphericalJn:
|
| 15 |
+
def test_spherical_jn_exact(self):
|
| 16 |
+
# https://dlmf.nist.gov/10.49.E3
|
| 17 |
+
# Note: exact expression is numerically stable only for small
|
| 18 |
+
# n or z >> n.
|
| 19 |
+
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
|
| 20 |
+
assert_allclose(spherical_jn(2, x),
|
| 21 |
+
(-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x))
|
| 22 |
+
|
| 23 |
+
def test_spherical_jn_recurrence_complex(self):
|
| 24 |
+
# https://dlmf.nist.gov/10.51.E1
|
| 25 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 26 |
+
x = 1.1 + 1.5j
|
| 27 |
+
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x),
|
| 28 |
+
(2*n + 1)/x*spherical_jn(n, x))
|
| 29 |
+
|
| 30 |
+
def test_spherical_jn_recurrence_real(self):
|
| 31 |
+
# https://dlmf.nist.gov/10.51.E1
|
| 32 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 33 |
+
x = 0.12
|
| 34 |
+
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x),
|
| 35 |
+
(2*n + 1)/x*spherical_jn(n, x))
|
| 36 |
+
|
| 37 |
+
def test_spherical_jn_inf_real(self):
|
| 38 |
+
# https://dlmf.nist.gov/10.52.E3
|
| 39 |
+
n = 6
|
| 40 |
+
x = np.array([-inf, inf])
|
| 41 |
+
assert_allclose(spherical_jn(n, x), np.array([0, 0]))
|
| 42 |
+
|
| 43 |
+
def test_spherical_jn_inf_complex(self):
|
| 44 |
+
# https://dlmf.nist.gov/10.52.E3
|
| 45 |
+
n = 7
|
| 46 |
+
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
|
| 47 |
+
with suppress_warnings() as sup:
|
| 48 |
+
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
|
| 49 |
+
assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
|
| 50 |
+
|
| 51 |
+
def test_spherical_jn_large_arg_1(self):
|
| 52 |
+
# https://github.com/scipy/scipy/issues/2165
|
| 53 |
+
# Reference value computed using mpmath, via
|
| 54 |
+
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
|
| 55 |
+
assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747)
|
| 56 |
+
|
| 57 |
+
def test_spherical_jn_large_arg_2(self):
|
| 58 |
+
# https://github.com/scipy/scipy/issues/1641
|
| 59 |
+
# Reference value computed using mpmath, via
|
| 60 |
+
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
|
| 61 |
+
assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05)
|
| 62 |
+
|
| 63 |
+
def test_spherical_jn_at_zero(self):
|
| 64 |
+
# https://dlmf.nist.gov/10.52.E1
|
| 65 |
+
# But note that n = 0 is a special case: j0 = sin(x)/x -> 1
|
| 66 |
+
n = np.array([0, 1, 2, 5, 10, 100])
|
| 67 |
+
x = 0
|
| 68 |
+
assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0]))
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class TestSphericalYn:
|
| 72 |
+
def test_spherical_yn_exact(self):
|
| 73 |
+
# https://dlmf.nist.gov/10.49.E5
|
| 74 |
+
# Note: exact expression is numerically stable only for small
|
| 75 |
+
# n or z >> n.
|
| 76 |
+
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
|
| 77 |
+
assert_allclose(spherical_yn(2, x),
|
| 78 |
+
(1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
|
| 79 |
+
|
| 80 |
+
def test_spherical_yn_recurrence_real(self):
|
| 81 |
+
# https://dlmf.nist.gov/10.51.E1
|
| 82 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 83 |
+
x = 0.12
|
| 84 |
+
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
|
| 85 |
+
(2*n + 1)/x*spherical_yn(n, x))
|
| 86 |
+
|
| 87 |
+
def test_spherical_yn_recurrence_complex(self):
|
| 88 |
+
# https://dlmf.nist.gov/10.51.E1
|
| 89 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 90 |
+
x = 1.1 + 1.5j
|
| 91 |
+
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
|
| 92 |
+
(2*n + 1)/x*spherical_yn(n, x))
|
| 93 |
+
|
| 94 |
+
def test_spherical_yn_inf_real(self):
|
| 95 |
+
# https://dlmf.nist.gov/10.52.E3
|
| 96 |
+
n = 6
|
| 97 |
+
x = np.array([-inf, inf])
|
| 98 |
+
assert_allclose(spherical_yn(n, x), np.array([0, 0]))
|
| 99 |
+
|
| 100 |
+
def test_spherical_yn_inf_complex(self):
|
| 101 |
+
# https://dlmf.nist.gov/10.52.E3
|
| 102 |
+
n = 7
|
| 103 |
+
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
|
| 104 |
+
with suppress_warnings() as sup:
|
| 105 |
+
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
|
| 106 |
+
assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
|
| 107 |
+
|
| 108 |
+
def test_spherical_yn_at_zero(self):
|
| 109 |
+
# https://dlmf.nist.gov/10.52.E2
|
| 110 |
+
n = np.array([0, 1, 2, 5, 10, 100])
|
| 111 |
+
x = 0
|
| 112 |
+
assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf))
|
| 113 |
+
|
| 114 |
+
def test_spherical_yn_at_zero_complex(self):
|
| 115 |
+
# Consistently with numpy:
|
| 116 |
+
# >>> -np.cos(0)/0
|
| 117 |
+
# -inf
|
| 118 |
+
# >>> -np.cos(0+0j)/(0+0j)
|
| 119 |
+
# (-inf + nan*j)
|
| 120 |
+
n = np.array([0, 1, 2, 5, 10, 100])
|
| 121 |
+
x = 0 + 0j
|
| 122 |
+
assert_allclose(spherical_yn(n, x), np.full(n.shape, nan))
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class TestSphericalJnYnCrossProduct:
|
| 126 |
+
def test_spherical_jn_yn_cross_product_1(self):
|
| 127 |
+
# https://dlmf.nist.gov/10.50.E3
|
| 128 |
+
n = np.array([1, 5, 8])
|
| 129 |
+
x = np.array([0.1, 1, 10])
|
| 130 |
+
left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
|
| 131 |
+
spherical_jn(n, x) * spherical_yn(n + 1, x))
|
| 132 |
+
right = 1/x**2
|
| 133 |
+
assert_allclose(left, right)
|
| 134 |
+
|
| 135 |
+
def test_spherical_jn_yn_cross_product_2(self):
|
| 136 |
+
# https://dlmf.nist.gov/10.50.E3
|
| 137 |
+
n = np.array([1, 5, 8])
|
| 138 |
+
x = np.array([0.1, 1, 10])
|
| 139 |
+
left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
|
| 140 |
+
spherical_jn(n, x) * spherical_yn(n + 2, x))
|
| 141 |
+
right = (2*n + 3)/x**3
|
| 142 |
+
assert_allclose(left, right)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class TestSphericalIn:
|
| 146 |
+
def test_spherical_in_exact(self):
|
| 147 |
+
# https://dlmf.nist.gov/10.49.E9
|
| 148 |
+
x = np.array([0.12, 1.23, 12.34, 123.45])
|
| 149 |
+
assert_allclose(spherical_in(2, x),
|
| 150 |
+
(1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
|
| 151 |
+
|
| 152 |
+
def test_spherical_in_recurrence_real(self):
|
| 153 |
+
# https://dlmf.nist.gov/10.51.E4
|
| 154 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 155 |
+
x = 0.12
|
| 156 |
+
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
|
| 157 |
+
(2*n + 1)/x*spherical_in(n, x))
|
| 158 |
+
|
| 159 |
+
def test_spherical_in_recurrence_complex(self):
|
| 160 |
+
# https://dlmf.nist.gov/10.51.E1
|
| 161 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 162 |
+
x = 1.1 + 1.5j
|
| 163 |
+
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
|
| 164 |
+
(2*n + 1)/x*spherical_in(n, x))
|
| 165 |
+
|
| 166 |
+
def test_spherical_in_inf_real(self):
|
| 167 |
+
# https://dlmf.nist.gov/10.52.E3
|
| 168 |
+
n = 5
|
| 169 |
+
x = np.array([-inf, inf])
|
| 170 |
+
assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
|
| 171 |
+
|
| 172 |
+
def test_spherical_in_inf_complex(self):
|
| 173 |
+
# https://dlmf.nist.gov/10.52.E5
|
| 174 |
+
# Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
|
| 175 |
+
# this appears impossible to achieve because C99 regards any complex
|
| 176 |
+
# value with at least one infinite part as a complex infinity, so
|
| 177 |
+
# 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is
|
| 178 |
+
# the correct return value.
|
| 179 |
+
n = 7
|
| 180 |
+
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
|
| 181 |
+
assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
|
| 182 |
+
|
| 183 |
+
def test_spherical_in_at_zero(self):
|
| 184 |
+
# https://dlmf.nist.gov/10.52.E1
|
| 185 |
+
# But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
|
| 186 |
+
n = np.array([0, 1, 2, 5, 10, 100])
|
| 187 |
+
x = 0
|
| 188 |
+
assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class TestSphericalKn:
|
| 192 |
+
def test_spherical_kn_exact(self):
|
| 193 |
+
# https://dlmf.nist.gov/10.49.E13
|
| 194 |
+
x = np.array([0.12, 1.23, 12.34, 123.45])
|
| 195 |
+
assert_allclose(spherical_kn(2, x),
|
| 196 |
+
pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3))
|
| 197 |
+
|
| 198 |
+
def test_spherical_kn_recurrence_real(self):
|
| 199 |
+
# https://dlmf.nist.gov/10.51.E4
|
| 200 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 201 |
+
x = 0.12
|
| 202 |
+
assert_allclose(
|
| 203 |
+
(-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
|
| 204 |
+
(-1)**n*(2*n + 1)/x*spherical_kn(n, x)
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
def test_spherical_kn_recurrence_complex(self):
|
| 208 |
+
# https://dlmf.nist.gov/10.51.E4
|
| 209 |
+
n = np.array([1, 2, 3, 7, 12])
|
| 210 |
+
x = 1.1 + 1.5j
|
| 211 |
+
assert_allclose(
|
| 212 |
+
(-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
|
| 213 |
+
(-1)**n*(2*n + 1)/x*spherical_kn(n, x)
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def test_spherical_kn_inf_real(self):
|
| 217 |
+
# https://dlmf.nist.gov/10.52.E6
|
| 218 |
+
n = 5
|
| 219 |
+
x = np.array([-inf, inf])
|
| 220 |
+
assert_allclose(spherical_kn(n, x), np.array([-inf, 0]))
|
| 221 |
+
|
| 222 |
+
def test_spherical_kn_inf_complex(self):
|
| 223 |
+
# https://dlmf.nist.gov/10.52.E6
|
| 224 |
+
# The behavior at complex infinity depends on the sign of the real
|
| 225 |
+
# part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's
|
| 226 |
+
# z*inf. This distinction cannot be captured, so we return nan.
|
| 227 |
+
n = 7
|
| 228 |
+
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
|
| 229 |
+
assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan]))
|
| 230 |
+
|
| 231 |
+
def test_spherical_kn_at_zero(self):
|
| 232 |
+
# https://dlmf.nist.gov/10.52.E2
|
| 233 |
+
n = np.array([0, 1, 2, 5, 10, 100])
|
| 234 |
+
x = 0
|
| 235 |
+
assert_allclose(spherical_kn(n, x), np.full(n.shape, inf))
|
| 236 |
+
|
| 237 |
+
def test_spherical_kn_at_zero_complex(self):
|
| 238 |
+
# https://dlmf.nist.gov/10.52.E2
|
| 239 |
+
n = np.array([0, 1, 2, 5, 10, 100])
|
| 240 |
+
x = 0 + 0j
|
| 241 |
+
assert_allclose(spherical_kn(n, x), np.full(n.shape, nan))
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class SphericalDerivativesTestCase:
|
| 245 |
+
def fundamental_theorem(self, n, a, b):
|
| 246 |
+
integral, tolerance = quad(lambda z: self.df(n, z), a, b)
|
| 247 |
+
assert_allclose(integral,
|
| 248 |
+
self.f(n, b) - self.f(n, a),
|
| 249 |
+
atol=tolerance)
|
| 250 |
+
|
| 251 |
+
@pytest.mark.slow
|
| 252 |
+
def test_fundamental_theorem_0(self):
|
| 253 |
+
self.fundamental_theorem(0, 3.0, 15.0)
|
| 254 |
+
|
| 255 |
+
@pytest.mark.slow
|
| 256 |
+
def test_fundamental_theorem_7(self):
|
| 257 |
+
self.fundamental_theorem(7, 0.5, 1.2)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class TestSphericalJnDerivatives(SphericalDerivativesTestCase):
|
| 261 |
+
def f(self, n, z):
|
| 262 |
+
return spherical_jn(n, z)
|
| 263 |
+
|
| 264 |
+
def df(self, n, z):
|
| 265 |
+
return spherical_jn(n, z, derivative=True)
|
| 266 |
+
|
| 267 |
+
def test_spherical_jn_d_zero(self):
|
| 268 |
+
n = np.array([0, 1, 2, 3, 7, 15])
|
| 269 |
+
assert_allclose(spherical_jn(n, 0, derivative=True),
|
| 270 |
+
np.array([0, 1/3, 0, 0, 0, 0]))
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class TestSphericalYnDerivatives(SphericalDerivativesTestCase):
|
| 274 |
+
def f(self, n, z):
|
| 275 |
+
return spherical_yn(n, z)
|
| 276 |
+
|
| 277 |
+
def df(self, n, z):
|
| 278 |
+
return spherical_yn(n, z, derivative=True)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class TestSphericalInDerivatives(SphericalDerivativesTestCase):
|
| 282 |
+
def f(self, n, z):
|
| 283 |
+
return spherical_in(n, z)
|
| 284 |
+
|
| 285 |
+
def df(self, n, z):
|
| 286 |
+
return spherical_in(n, z, derivative=True)
|
| 287 |
+
|
| 288 |
+
def test_spherical_in_d_zero(self):
|
| 289 |
+
n = np.array([0, 1, 2, 3, 7, 15])
|
| 290 |
+
spherical_in(n, 0, derivative=False)
|
| 291 |
+
assert_allclose(spherical_in(n, 0, derivative=True),
|
| 292 |
+
np.array([0, 1/3, 0, 0, 0, 0]))
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class TestSphericalKnDerivatives(SphericalDerivativesTestCase):
|
| 296 |
+
def f(self, n, z):
|
| 297 |
+
return spherical_kn(n, z)
|
| 298 |
+
|
| 299 |
+
def df(self, n, z):
|
| 300 |
+
return spherical_kn(n, z, derivative=True)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
class TestSphericalOld:
|
| 304 |
+
# These are tests from the TestSpherical class of test_basic.py,
|
| 305 |
+
# rewritten to use spherical_* instead of sph_* but otherwise unchanged.
|
| 306 |
+
|
| 307 |
+
def test_sph_in(self):
|
| 308 |
+
# This test reproduces test_basic.TestSpherical.test_sph_in.
|
| 309 |
+
i1n = np.empty((2,2))
|
| 310 |
+
x = 0.2
|
| 311 |
+
|
| 312 |
+
i1n[0][0] = spherical_in(0, x)
|
| 313 |
+
i1n[0][1] = spherical_in(1, x)
|
| 314 |
+
i1n[1][0] = spherical_in(0, x, derivative=True)
|
| 315 |
+
i1n[1][1] = spherical_in(1, x, derivative=True)
|
| 316 |
+
|
| 317 |
+
inp0 = (i1n[0][1])
|
| 318 |
+
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
|
| 319 |
+
assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381,
|
| 320 |
+
0.066933714568029540839]),12)
|
| 321 |
+
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
|
| 322 |
+
|
| 323 |
+
def test_sph_in_kn_order0(self):
|
| 324 |
+
x = 1.
|
| 325 |
+
sph_i0 = np.empty((2,))
|
| 326 |
+
sph_i0[0] = spherical_in(0, x)
|
| 327 |
+
sph_i0[1] = spherical_in(0, x, derivative=True)
|
| 328 |
+
sph_i0_expected = np.array([np.sinh(x)/x,
|
| 329 |
+
np.cosh(x)/x-np.sinh(x)/x**2])
|
| 330 |
+
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
|
| 331 |
+
|
| 332 |
+
sph_k0 = np.empty((2,))
|
| 333 |
+
sph_k0[0] = spherical_kn(0, x)
|
| 334 |
+
sph_k0[1] = spherical_kn(0, x, derivative=True)
|
| 335 |
+
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
|
| 336 |
+
-0.5*pi*exp(-x)*(1/x+1/x**2)])
|
| 337 |
+
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
|
| 338 |
+
|
| 339 |
+
def test_sph_jn(self):
|
| 340 |
+
s1 = np.empty((2,3))
|
| 341 |
+
x = 0.2
|
| 342 |
+
|
| 343 |
+
s1[0][0] = spherical_jn(0, x)
|
| 344 |
+
s1[0][1] = spherical_jn(1, x)
|
| 345 |
+
s1[0][2] = spherical_jn(2, x)
|
| 346 |
+
s1[1][0] = spherical_jn(0, x, derivative=True)
|
| 347 |
+
s1[1][1] = spherical_jn(1, x, derivative=True)
|
| 348 |
+
s1[1][2] = spherical_jn(2, x, derivative=True)
|
| 349 |
+
|
| 350 |
+
s10 = -s1[0][1]
|
| 351 |
+
s11 = s1[0][0]-2.0/0.2*s1[0][1]
|
| 352 |
+
s12 = s1[0][1]-3.0/0.2*s1[0][2]
|
| 353 |
+
assert_array_almost_equal(s1[0],[0.99334665397530607731,
|
| 354 |
+
0.066400380670322230863,
|
| 355 |
+
0.0026590560795273856680],12)
|
| 356 |
+
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
|
| 357 |
+
|
| 358 |
+
def test_sph_kn(self):
|
| 359 |
+
kn = np.empty((2,3))
|
| 360 |
+
x = 0.2
|
| 361 |
+
|
| 362 |
+
kn[0][0] = spherical_kn(0, x)
|
| 363 |
+
kn[0][1] = spherical_kn(1, x)
|
| 364 |
+
kn[0][2] = spherical_kn(2, x)
|
| 365 |
+
kn[1][0] = spherical_kn(0, x, derivative=True)
|
| 366 |
+
kn[1][1] = spherical_kn(1, x, derivative=True)
|
| 367 |
+
kn[1][2] = spherical_kn(2, x, derivative=True)
|
| 368 |
+
|
| 369 |
+
kn0 = -kn[0][1]
|
| 370 |
+
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
|
| 371 |
+
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
|
| 372 |
+
assert_array_almost_equal(kn[0],[6.4302962978445670140,
|
| 373 |
+
38.581777787067402086,
|
| 374 |
+
585.15696310385559829],12)
|
| 375 |
+
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
|
| 376 |
+
|
| 377 |
+
def test_sph_yn(self):
|
| 378 |
+
sy1 = spherical_yn(2, 0.2)
|
| 379 |
+
sy2 = spherical_yn(0, 0.2)
|
| 380 |
+
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
|
| 381 |
+
assert_almost_equal(sy2,-4.9003329,5)
|
| 382 |
+
sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3
|
| 383 |
+
sy3 = spherical_yn(1, 0.2, derivative=True)
|
| 384 |
+
# compare correct derivative val. (correct =-system val).
|
| 385 |
+
assert_almost_equal(sy3,sphpy,4)
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet(const at::Tensor & A);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _test_warn_in_autograd(const at::Tensor & self);
|
| 20 |
+
TORCH_API at::Tensor & _test_warn_in_autograd_out(const at::Tensor & self, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace meta
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Scalar & other);
|
| 20 |
+
TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Scalar & other);
|
| 21 |
+
TORCH_API at::Tensor __and__(const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API as_strided_copy {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional<c10::SymInt>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_copy")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API as_strided_copy_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional<c10::SymInt>, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_copy")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API avg_pool2d_out {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional<int64_t>, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API avg_pool2d {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional<int64_t>);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0);
|
| 21 |
+
TORCH_API at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace cpu
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mul_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor mul(const at::Tensor & self, const at::Scalar & other);
|
| 21 |
+
TORCH_API at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
|
| 22 |
+
TORCH_API at::Tensor & mul_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & mul_(at::Tensor & self, const at::Scalar & other);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/permute.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/permute_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
|
| 26 |
+
inline at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) {
|
| 27 |
+
return at::_ops::permute::call(self, dims);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & quantize_per_tensor_dynamic_out(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/refine_names_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API refine_names {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, at::DimnameList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::refine_names")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, at::DimnameList names);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0);
|
| 21 |
+
TORCH_API at::Tensor & smooth_l1_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0);
|
| 22 |
+
TORCH_API at::Tensor & smooth_l1_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor special_spherical_bessel_j0(const at::Tensor & x);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, bool unbiased);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false);
|
| 23 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction=c10::nullopt, bool keepdim=false);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeimplicitautograd
|
| 26 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: compressed-tensors
|
| 3 |
+
Version: 0.9.1
|
| 4 |
+
Summary: Library for utilization of compressed safetensors of neural network models
|
| 5 |
+
Home-page: https://github.com/neuralmagic/compressed-tensors
|
| 6 |
+
Author: Neuralmagic, Inc.
|
| 7 |
+
Author-email: support@neuralmagic.com
|
| 8 |
+
License: Apache 2.0
|
| 9 |
+
Description-Content-Type: text/markdown
|
| 10 |
+
License-File: LICENSE
|
| 11 |
+
Requires-Dist: torch>=1.7.0
|
| 12 |
+
Requires-Dist: transformers
|
| 13 |
+
Requires-Dist: pydantic>=2.0
|
| 14 |
+
Provides-Extra: accelerate
|
| 15 |
+
Requires-Dist: accelerate; extra == "accelerate"
|
| 16 |
+
Provides-Extra: dev
|
| 17 |
+
Requires-Dist: black==22.12.0; extra == "dev"
|
| 18 |
+
Requires-Dist: isort==5.8.0; extra == "dev"
|
| 19 |
+
Requires-Dist: wheel>=0.36.2; extra == "dev"
|
| 20 |
+
Requires-Dist: flake8>=3.8.3; extra == "dev"
|
| 21 |
+
Requires-Dist: pytest>=6.0.0; extra == "dev"
|
| 22 |
+
Requires-Dist: nbconvert>=7.16.3; extra == "dev"
|
| 23 |
+
|
| 24 |
+
# compressed-tensors
|
| 25 |
+
|
| 26 |
+
The `compressed-tensors` library extends the [safetensors](https://github.com/huggingface/safetensors) format, providing a versatile and efficient way to store and manage compressed tensor data. This library supports various quantization and sparsity schemes, making it a unified format for handling different model optimizations like GPTQ, AWQ, SmoothQuant, INT8, FP8, SparseGPT, and more.
|
| 27 |
+
|
| 28 |
+
## Why `compressed-tensors`?
|
| 29 |
+
|
| 30 |
+
As model compression becomes increasingly important for efficient deployment of LLMs, the landscape of quantization and compression techniques has become increasingly fragmented.
|
| 31 |
+
Each method often comes with its own storage format and loading procedures, making it challenging to work with multiple techniques or switch between them.
|
| 32 |
+
`compressed-tensors` addresses this by providing a single, extensible format that can represent a wide variety of compression schemes.
|
| 33 |
+
|
| 34 |
+
* **Unified Checkpoint Format**: Supports various compression schemes in a single, consistent format.
|
| 35 |
+
* **Wide Compatibility**: Works with popular quantization methods like GPTQ, SmoothQuant, and FP8. See [llm-compressor](https://github.com/vllm-project/llm-compressor)
|
| 36 |
+
* **Flexible Quantization Support**:
|
| 37 |
+
* Weight-only quantization (e.g., W4A16, W8A16, WnA16)
|
| 38 |
+
* Activation quantization (e.g., W8A8)
|
| 39 |
+
* KV cache quantization
|
| 40 |
+
* Non-uniform schemes (different layers can be quantized in different ways!)
|
| 41 |
+
* **Sparsity Support**: Handles both unstructured and semi-structured (e.g., 2:4) sparsity patterns.
|
| 42 |
+
* **Open-Source Integration**: Designed to work seamlessly with Hugging Face models and PyTorch.
|
| 43 |
+
|
| 44 |
+
This allows developers and researchers to easily experiment with composing different quantization methods, simplify model deployment pipelines, and reduce the overhead of supporting multiple compression formats in inference engines.
|
| 45 |
+
|
| 46 |
+
## Installation
|
| 47 |
+
|
| 48 |
+
### From [PyPI](https://pypi.org/project/compressed-tensors)
|
| 49 |
+
|
| 50 |
+
Stable release:
|
| 51 |
+
```bash
|
| 52 |
+
pip install compressed-tensors
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
Nightly release:
|
| 56 |
+
```bash
|
| 57 |
+
pip install compressed-tensors-nightly
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
### From Source
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
git clone https://github.com/neuralmagic/compressed-tensors
|
| 64 |
+
cd compressed-tensors
|
| 65 |
+
pip install -e .
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## Getting started
|
| 69 |
+
|
| 70 |
+
### Saving/Loading Compressed Tensors (Bitmask Compression)
|
| 71 |
+
|
| 72 |
+
The function `save_compressed` uses the `compression_format` argument to apply compression to tensors.
|
| 73 |
+
The function `load_compressed` reverses the process: converts the compressed weights on disk to decompressed weights in device memory.
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
from compressed_tensors import save_compressed, load_compressed, BitmaskConfig
|
| 77 |
+
from torch import Tensor
|
| 78 |
+
from typing import Dict
|
| 79 |
+
|
| 80 |
+
# the example BitmaskConfig method efficiently compresses
|
| 81 |
+
# tensors with large number of zero entries
|
| 82 |
+
compression_config = BitmaskConfig()
|
| 83 |
+
|
| 84 |
+
tensors: Dict[str, Tensor] = {"tensor_1": Tensor(
|
| 85 |
+
[[0.0, 0.0, 0.0],
|
| 86 |
+
[1.0, 1.0, 1.0]]
|
| 87 |
+
)}
|
| 88 |
+
# compress tensors using BitmaskConfig compression format (save them efficiently on disk)
|
| 89 |
+
save_compressed(tensors, "model.safetensors", compression_format=compression_config.format)
|
| 90 |
+
|
| 91 |
+
# decompress tensors (load_compressed returns a generator for memory efficiency)
|
| 92 |
+
decompressed_tensors = {}
|
| 93 |
+
for tensor_name, tensor in load_compressed("model.safetensors", compression_config = compression_config):
|
| 94 |
+
decompressed_tensors[tensor_name] = tensor
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
## Saving/Loading Compressed Models (Bitmask Compression)
|
| 98 |
+
|
| 99 |
+
We can apply bitmask compression to a whole model. For more detailed example see `example` directory.
|
| 100 |
+
```python
|
| 101 |
+
from compressed_tensors import save_compressed_model, load_compressed, BitmaskConfig
|
| 102 |
+
from transformers import AutoModelForCausalLM
|
| 103 |
+
|
| 104 |
+
model_name = "neuralmagic/llama2.c-stories110M-pruned50"
|
| 105 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto")
|
| 106 |
+
|
| 107 |
+
original_state_dict = model.state_dict()
|
| 108 |
+
|
| 109 |
+
compression_config = BitmaskConfig()
|
| 110 |
+
|
| 111 |
+
# save compressed model weights
|
| 112 |
+
save_compressed_model(model, "compressed_model.safetensors", compression_format=compression_config.format)
|
| 113 |
+
|
| 114 |
+
# load compressed model weights (`dict` turns generator into a dictionary)
|
| 115 |
+
state_dict = dict(load_compressed("compressed_model.safetensors", compression_config))
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
For more in-depth tutorial on bitmask compression, refer to the [notebook](https://github.com/neuralmagic/compressed-tensors/blob/d707c5b84bc3fef164aebdcd97cb6eaa571982f8/examples/bitmask_compression.ipynb).
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
## Saving a Compressed Model with PTQ
|
| 122 |
+
|
| 123 |
+
We can use compressed-tensors to run basic post training quantization (PTQ) and save the quantized model compressed on disk
|
| 124 |
+
|
| 125 |
+
```python
|
| 126 |
+
model_name = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T"
|
| 127 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cuda:0", torch_dtype="auto")
|
| 128 |
+
|
| 129 |
+
config = QuantizationConfig.parse_file("./examples/bit_packing/int4_config.json")
|
| 130 |
+
config.quantization_status = QuantizationStatus.CALIBRATION
|
| 131 |
+
apply_quantization_config(model, config)
|
| 132 |
+
|
| 133 |
+
dataset = load_dataset("ptb_text_only")["train"]
|
| 134 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 135 |
+
|
| 136 |
+
def tokenize_function(examples):
|
| 137 |
+
return tokenizer(examples["sentence"], padding=False, truncation=True, max_length=1024)
|
| 138 |
+
|
| 139 |
+
tokenized_dataset = dataset.map(tokenize_function, batched=True)
|
| 140 |
+
data_loader = DataLoader(tokenized_dataset, batch_size=1, collate_fn=DefaultDataCollator())
|
| 141 |
+
|
| 142 |
+
with torch.no_grad():
|
| 143 |
+
for idx, sample in tqdm(enumerate(data_loader), desc="Running calibration"):
|
| 144 |
+
sample = {key: value.to(device) for key,value in sample.items()}
|
| 145 |
+
_ = model(**sample)
|
| 146 |
+
|
| 147 |
+
if idx >= 512:
|
| 148 |
+
break
|
| 149 |
+
|
| 150 |
+
model.apply(freeze_module_quantization)
|
| 151 |
+
model.apply(compress_quantized_weights)
|
| 152 |
+
|
| 153 |
+
output_dir = "./ex_llama1.1b_w4a16_packed_quantize"
|
| 154 |
+
compressor = ModelCompressor(quantization_config=config)
|
| 155 |
+
compressed_state_dict = compressor.compress(model)
|
| 156 |
+
model.save_pretrained(output_dir, state_dict=compressed_state_dict)
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
For more in-depth tutorial on quantization compression, refer to the [notebook](./examples/quantize_and_pack_int4.ipynb).
|
vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
compressed_tensors-0.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
compressed_tensors-0.9.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
| 3 |
+
compressed_tensors-0.9.1.dist-info/METADATA,sha256=LTzdui2DBwsv09xaTEsW2X66rd775Jf2lY9v9hs_WJg,6782
|
| 4 |
+
compressed_tensors-0.9.1.dist-info/RECORD,,
|
| 5 |
+
compressed_tensors-0.9.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
compressed_tensors-0.9.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
| 7 |
+
compressed_tensors-0.9.1.dist-info/top_level.txt,sha256=w2i-GyPs2s1UwVxvutSvN_lM22SXC2hQFBmoMcPnV7Y,19
|
| 8 |
+
compressed_tensors/__init__.py,sha256=UtKmifNeBCSE2TZSAfduVNNzHY-3V7bLjZ7n7RuXLOE,812
|
| 9 |
+
compressed_tensors/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
compressed_tensors/__pycache__/base.cpython-310.pyc,,
|
| 11 |
+
compressed_tensors/__pycache__/version.cpython-310.pyc,,
|
| 12 |
+
compressed_tensors/base.py,sha256=73HYH7HY7O2roC89yG_piPFnZwrBfn_i7HmKl90SKc0,875
|
| 13 |
+
compressed_tensors/compressors/__init__.py,sha256=smSygTSfcfuujRrAXDc6uZm4L_ccV1tWZewqVnOb4lM,825
|
| 14 |
+
compressed_tensors/compressors/__pycache__/__init__.cpython-310.pyc,,
|
| 15 |
+
compressed_tensors/compressors/__pycache__/base.cpython-310.pyc,,
|
| 16 |
+
compressed_tensors/compressors/__pycache__/helpers.cpython-310.pyc,,
|
| 17 |
+
compressed_tensors/compressors/base.py,sha256=D9TNwQcjanDiAHODPbg8JUqc66e3j50rctY7A708NEs,6743
|
| 18 |
+
compressed_tensors/compressors/helpers.py,sha256=OK6qxX9j3bHwF9JfIYSGMgBJe2PWjlTA3byXKCJaTIQ,5431
|
| 19 |
+
compressed_tensors/compressors/model_compressors/__init__.py,sha256=5RGGPFu4YqEt_aOdFSQYFYFDjcZFJN0CsMqRtDZz3Js,666
|
| 20 |
+
compressed_tensors/compressors/model_compressors/__pycache__/__init__.cpython-310.pyc,,
|
| 21 |
+
compressed_tensors/compressors/model_compressors/__pycache__/model_compressor.cpython-310.pyc,,
|
| 22 |
+
compressed_tensors/compressors/model_compressors/model_compressor.py,sha256=3WyzAW2Rm_uLprxwO2QH6FR76W6Mk4r2yedayaSZHhw,18396
|
| 23 |
+
compressed_tensors/compressors/quantized_compressors/__init__.py,sha256=09UJq68Pht6Bf-4iP9xYl3tetKsncNPHD8IAGbePsr4,714
|
| 24 |
+
compressed_tensors/compressors/quantized_compressors/__pycache__/__init__.cpython-310.pyc,,
|
| 25 |
+
compressed_tensors/compressors/quantized_compressors/__pycache__/base.cpython-310.pyc,,
|
| 26 |
+
compressed_tensors/compressors/quantized_compressors/__pycache__/naive_quantized.cpython-310.pyc,,
|
| 27 |
+
compressed_tensors/compressors/quantized_compressors/__pycache__/pack_quantized.cpython-310.pyc,,
|
| 28 |
+
compressed_tensors/compressors/quantized_compressors/base.py,sha256=LVqSSqSjGi8LB-X13zC_0AFHc8BobGQVC0zjInDhOWE,7217
|
| 29 |
+
compressed_tensors/compressors/quantized_compressors/naive_quantized.py,sha256=fahmPJFz49rVS7q705uQwZ0kUtdP46GuXR7nPr6uIqI,4943
|
| 30 |
+
compressed_tensors/compressors/quantized_compressors/pack_quantized.py,sha256=OO5dceCfNVuY8A23kBg6z2wk-zGUVqR_MyLvObvT7pk,7741
|
| 31 |
+
compressed_tensors/compressors/sparse_compressors/__init__.py,sha256=Atuz-OdEgn8OCUhx7Ovd6gXdyImAI186uCR-uR0t_Nk,737
|
| 32 |
+
compressed_tensors/compressors/sparse_compressors/__pycache__/__init__.cpython-310.pyc,,
|
| 33 |
+
compressed_tensors/compressors/sparse_compressors/__pycache__/base.cpython-310.pyc,,
|
| 34 |
+
compressed_tensors/compressors/sparse_compressors/__pycache__/dense.cpython-310.pyc,,
|
| 35 |
+
compressed_tensors/compressors/sparse_compressors/__pycache__/sparse_24_bitmask.cpython-310.pyc,,
|
| 36 |
+
compressed_tensors/compressors/sparse_compressors/__pycache__/sparse_bitmask.cpython-310.pyc,,
|
| 37 |
+
compressed_tensors/compressors/sparse_compressors/base.py,sha256=9e841MQWr0j8m33ejDw_jP5_BIpQ5099x9_pvuZ-Nr0,5944
|
| 38 |
+
compressed_tensors/compressors/sparse_compressors/dense.py,sha256=lSKNWRx6H7aUqaJj1j4qbXk8Gkm1UohbnvW1Rvq6Ra4,1284
|
| 39 |
+
compressed_tensors/compressors/sparse_compressors/sparse_24_bitmask.py,sha256=_g139pe4iAFn5jvGIEk4v-qMoyx9ID6E88vriPSNYV4,8604
|
| 40 |
+
compressed_tensors/compressors/sparse_compressors/sparse_bitmask.py,sha256=7zSr9bqkpuH1ivQpxtYBNxXIoElal7Jo1nSKpZN_IFk,5633
|
| 41 |
+
compressed_tensors/compressors/sparse_quantized_compressors/__init__.py,sha256=4f_cwcKXB1nVVMoiKgTFAc8jAPjPLElo-Df_EDm1_xw,675
|
| 42 |
+
compressed_tensors/compressors/sparse_quantized_compressors/__pycache__/__init__.cpython-310.pyc,,
|
| 43 |
+
compressed_tensors/compressors/sparse_quantized_compressors/__pycache__/marlin_24.cpython-310.pyc,,
|
| 44 |
+
compressed_tensors/compressors/sparse_quantized_compressors/marlin_24.py,sha256=BMIQWTLlnUvxy14iEJegtiP75WHJeOVojey9mKOK1hE,9427
|
| 45 |
+
compressed_tensors/config/__init__.py,sha256=8sOoZ6xvYSC79mBvEtO8l6xk4PC80d29AnnJiGMrY2M,737
|
| 46 |
+
compressed_tensors/config/__pycache__/__init__.cpython-310.pyc,,
|
| 47 |
+
compressed_tensors/config/__pycache__/base.cpython-310.pyc,,
|
| 48 |
+
compressed_tensors/config/__pycache__/dense.cpython-310.pyc,,
|
| 49 |
+
compressed_tensors/config/__pycache__/sparse_24_bitmask.cpython-310.pyc,,
|
| 50 |
+
compressed_tensors/config/__pycache__/sparse_bitmask.cpython-310.pyc,,
|
| 51 |
+
compressed_tensors/config/base.py,sha256=R3iUmFf1MslEjin5LgwQbmfJHIsS7Uw0UIxfn780uqY,3479
|
| 52 |
+
compressed_tensors/config/dense.py,sha256=NgSxnFCnckU9-iunxEaqiFwqgdO7YYxlWKR74jNbjks,1317
|
| 53 |
+
compressed_tensors/config/sparse_24_bitmask.py,sha256=Lhj39zT2V1hxftprvxvneyhv45ShlXOKd75DBbDTyTE,1401
|
| 54 |
+
compressed_tensors/config/sparse_bitmask.py,sha256=pZUboRNZTu6NajGOQEFExoPknak5ynVAUeiiYpS1Gt8,1308
|
| 55 |
+
compressed_tensors/linear/__init__.py,sha256=fH6rjBYAxuwrTzBTlTjTgCYNyh6TCvCqajCz4Im4YrA,617
|
| 56 |
+
compressed_tensors/linear/__pycache__/__init__.cpython-310.pyc,,
|
| 57 |
+
compressed_tensors/linear/__pycache__/compressed_linear.cpython-310.pyc,,
|
| 58 |
+
compressed_tensors/linear/compressed_linear.py,sha256=MJa-UfoKhIkdUWRD1shrXXri2cOwR5GK0a4t4bNYosM,3268
|
| 59 |
+
compressed_tensors/quantization/__init__.py,sha256=83J5bPB7PavN2TfCoW7_vEDhfYpm4TDrqYO9vdSQ5bk,760
|
| 60 |
+
compressed_tensors/quantization/__pycache__/__init__.cpython-310.pyc,,
|
| 61 |
+
compressed_tensors/quantization/__pycache__/quant_args.cpython-310.pyc,,
|
| 62 |
+
compressed_tensors/quantization/__pycache__/quant_config.cpython-310.pyc,,
|
| 63 |
+
compressed_tensors/quantization/__pycache__/quant_scheme.cpython-310.pyc,,
|
| 64 |
+
compressed_tensors/quantization/lifecycle/__init__.py,sha256=_uItzFWusyV74Zco_pHLOTdE9a83cL-R-ZdyQrBkIyw,772
|
| 65 |
+
compressed_tensors/quantization/lifecycle/__pycache__/__init__.cpython-310.pyc,,
|
| 66 |
+
compressed_tensors/quantization/lifecycle/__pycache__/apply.cpython-310.pyc,,
|
| 67 |
+
compressed_tensors/quantization/lifecycle/__pycache__/compressed.cpython-310.pyc,,
|
| 68 |
+
compressed_tensors/quantization/lifecycle/__pycache__/forward.cpython-310.pyc,,
|
| 69 |
+
compressed_tensors/quantization/lifecycle/__pycache__/helpers.cpython-310.pyc,,
|
| 70 |
+
compressed_tensors/quantization/lifecycle/__pycache__/initialize.cpython-310.pyc,,
|
| 71 |
+
compressed_tensors/quantization/lifecycle/apply.py,sha256=XS4M6N1opKBybhkuQsS338QVb_CKMhUM5TUKrqoNQ0k,16517
|
| 72 |
+
compressed_tensors/quantization/lifecycle/compressed.py,sha256=Fj9n66IN0EWsOAkBHg3O0GlOQpxstqjCcs0ttzMXrJ0,2296
|
| 73 |
+
compressed_tensors/quantization/lifecycle/forward.py,sha256=DOWouUqfaLA4Qhg-ojVVBdhhSAlgZqFC26vZARxE0ko,12961
|
| 74 |
+
compressed_tensors/quantization/lifecycle/helpers.py,sha256=C0mhy2vJ0fCjVeN4kFNhw8Eq1wkteBGHiZ36RVLThRY,944
|
| 75 |
+
compressed_tensors/quantization/lifecycle/initialize.py,sha256=hymYtayTSumm8KCYAYPY267aWmlsJpt8oQFiRblk8qE,7452
|
| 76 |
+
compressed_tensors/quantization/quant_args.py,sha256=jwC__lSmuiJ2qSJYYZGgWgQNbZu6YhhS0e-qugrTNXE,9058
|
| 77 |
+
compressed_tensors/quantization/quant_config.py,sha256=vx06wBo91p4LCb3Vzd-2eCTUeIf_Sz2ZXRP263eQyjQ,10385
|
| 78 |
+
compressed_tensors/quantization/quant_scheme.py,sha256=eQ0JrRZ80GX69fpwW87VzPzzhajhk4mUaJScjk82OY4,6010
|
| 79 |
+
compressed_tensors/quantization/utils/__init__.py,sha256=VdtEmP0bvuND_IGQnyqUPc5lnFp-1_yD7StKSX4x80w,656
|
| 80 |
+
compressed_tensors/quantization/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 81 |
+
compressed_tensors/quantization/utils/__pycache__/helpers.cpython-310.pyc,,
|
| 82 |
+
compressed_tensors/quantization/utils/helpers.py,sha256=DBP-sGRpGAY01K0LFE7qqonNj4hkTYL_mXrMs2LtAD8,14100
|
| 83 |
+
compressed_tensors/registry/__init__.py,sha256=FwLSNYqfIrb5JD_6OK_MT4_svvKTN_nEhpgQlQvGbjI,658
|
| 84 |
+
compressed_tensors/registry/__pycache__/__init__.cpython-310.pyc,,
|
| 85 |
+
compressed_tensors/registry/__pycache__/registry.cpython-310.pyc,,
|
| 86 |
+
compressed_tensors/registry/registry.py,sha256=vRcjVB1ITfSbfYUaGndBBmqhip_5vsS62weorVg0iXo,11896
|
| 87 |
+
compressed_tensors/utils/__init__.py,sha256=gS4gSU2pwcAbsKj-6YMaqhm25udFy6ISYaWBf-myRSM,808
|
| 88 |
+
compressed_tensors/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 89 |
+
compressed_tensors/utils/__pycache__/helpers.cpython-310.pyc,,
|
| 90 |
+
compressed_tensors/utils/__pycache__/offload.cpython-310.pyc,,
|
| 91 |
+
compressed_tensors/utils/__pycache__/permutations_24.cpython-310.pyc,,
|
| 92 |
+
compressed_tensors/utils/__pycache__/permute.cpython-310.pyc,,
|
| 93 |
+
compressed_tensors/utils/__pycache__/safetensors_load.cpython-310.pyc,,
|
| 94 |
+
compressed_tensors/utils/__pycache__/semi_structured_conversions.cpython-310.pyc,,
|
| 95 |
+
compressed_tensors/utils/helpers.py,sha256=xQHZXwIAAybC8mMTiAtWSOeggZMT1JOC6_wcDvlo2yk,10320
|
| 96 |
+
compressed_tensors/utils/offload.py,sha256=cMmzd9IdlNbs29CReHj1PPSLUM6OWaT5YumlLT5eP3w,13845
|
| 97 |
+
compressed_tensors/utils/permutations_24.py,sha256=kx6fsfDHebx94zsSzhXGyCyuC9sVyah6BUUir_StT28,2530
|
| 98 |
+
compressed_tensors/utils/permute.py,sha256=V6tJLKo3Syccj-viv4F7ZKZgJeCB-hl-dK8RKI_kBwI,2355
|
| 99 |
+
compressed_tensors/utils/safetensors_load.py,sha256=fBuoHVPoBt1mkvqFJ60zQIASX_4nhl0-6QfFS27NY8I,11430
|
| 100 |
+
compressed_tensors/utils/semi_structured_conversions.py,sha256=XKNffPum54kPASgqKzgKvyeqWPAkair2XEQXjkp7ho8,13489
|
| 101 |
+
compressed_tensors/version.py,sha256=cPIrNBysxeJxrC4lzqGpVUu_oM56xF851VDnvn1gsew,1585
|
vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/REQUESTED
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.45.1)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
vllm/lib/python3.10/site-packages/compressed_tensors-0.9.1.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
compressed_tensors
|
vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.3
|
| 2 |
+
Name: einops
|
| 3 |
+
Version: 0.8.0
|
| 4 |
+
Summary: A new flavour of deep learning operations
|
| 5 |
+
Project-URL: Homepage, https://github.com/arogozhnikov/einops
|
| 6 |
+
Author: Alex Rogozhnikov
|
| 7 |
+
License: MIT
|
| 8 |
+
License-File: LICENSE
|
| 9 |
+
Keywords: deep learning,einops,machine learning,neural networks,scientific computations,tensor manipulation
|
| 10 |
+
Classifier: Intended Audience :: Science/Research
|
| 11 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 12 |
+
Classifier: Programming Language :: Python :: 3
|
| 13 |
+
Requires-Python: >=3.8
|
| 14 |
+
Description-Content-Type: text/markdown
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
<!--
|
| 18 |
+
<a href='http://arogozhnikov.github.io/images/einops/einops_video.mp4' >
|
| 19 |
+
<div align="center">
|
| 20 |
+
<img src="http://arogozhnikov.github.io/images/einops/einops_video.gif" alt="einops package examples" />
|
| 21 |
+
<br>
|
| 22 |
+
<small><a href='http://arogozhnikov.github.io/images/einops/einops_video.mp4'>This video in high quality (mp4)</a></small>
|
| 23 |
+
<br><br>
|
| 24 |
+
</div>
|
| 25 |
+
</a>
|
| 26 |
+
-->
|
| 27 |
+
|
| 28 |
+
<!-- this link magically rendered as video, unfortunately not in docs -->
|
| 29 |
+
|
| 30 |
+
https://user-images.githubusercontent.com/6318811/177030658-66f0eb5d-e136-44d8-99c9-86ae298ead5b.mp4
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# einops
|
| 36 |
+
[](https://github.com/arogozhnikov/einops/actions/workflows/run_tests.yml)
|
| 37 |
+
[](https://badge.fury.io/py/einops)
|
| 38 |
+
[](https://einops.rocks/)
|
| 39 |
+

|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
Flexible and powerful tensor operations for readable and reliable code. <br />
|
| 43 |
+
Supports numpy, pytorch, tensorflow, jax, and [others](#supported-frameworks).
|
| 44 |
+
|
| 45 |
+
## Recent updates:
|
| 46 |
+
|
| 47 |
+
- 0.7.0: no-hassle `torch.compile`, support of [array api standard](https://data-apis.org/array-api/latest/API_specification/index.html) and more
|
| 48 |
+
- 10'000🎉: github reports that more than 10k project use einops
|
| 49 |
+
- einops 0.6.1: paddle backend added
|
| 50 |
+
- einops 0.6 introduces [packing and unpacking](https://github.com/arogozhnikov/einops/blob/master/docs/4-pack-and-unpack.ipynb)
|
| 51 |
+
- einops 0.5: einsum is now a part of einops
|
| 52 |
+
- [Einops paper](https://openreview.net/pdf?id=oapKSVM2bcj) is accepted for oral presentation at ICLR 2022 (yes, it worth reading).
|
| 53 |
+
Talk recordings are [available](https://iclr.cc/virtual/2022/oral/6603)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
<details markdown="1">
|
| 57 |
+
<summary>Previous updates</summary>
|
| 58 |
+
- flax and oneflow backend added
|
| 59 |
+
- torch.jit.script is supported for pytorch layers
|
| 60 |
+
- powerful EinMix added to einops. [Einmix tutorial notebook](https://github.com/arogozhnikov/einops/blob/master/docs/3-einmix-layer.ipynb)
|
| 61 |
+
</details>
|
| 62 |
+
|
| 63 |
+
<!--<div align="center">
|
| 64 |
+
<img src="http://arogozhnikov.github.io/images/einops/einops_logo_350x350.png"
|
| 65 |
+
alt="einops package logo" width="250" height="250" />
|
| 66 |
+
<br><br>
|
| 67 |
+
</div> -->
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
## Tweets
|
| 71 |
+
|
| 72 |
+
> In case you need convincing arguments for setting aside time to learn about einsum and einops...
|
| 73 |
+
[Tim Rocktäschel](https://twitter.com/_rockt/status/1230818967205425152)
|
| 74 |
+
|
| 75 |
+
> Writing better code with PyTorch and einops 👌
|
| 76 |
+
[Andrej Karpathy](https://twitter.com/karpathy/status/1290826075916779520)
|
| 77 |
+
|
| 78 |
+
> Slowly but surely, einops is seeping in to every nook and cranny of my code. If you find yourself shuffling around bazillion dimensional tensors, this might change your life
|
| 79 |
+
[Nasim Rahaman](https://twitter.com/nasim_rahaman/status/1216022614755463169)
|
| 80 |
+
|
| 81 |
+
[More testimonials](https://einops.rocks/pages/testimonials/)
|
| 82 |
+
|
| 83 |
+
<!--
|
| 84 |
+
## Recordings of talk at ICLR 2022
|
| 85 |
+
|
| 86 |
+
<a href='https://iclr.cc/virtual/2022/oral/6603'>
|
| 87 |
+
<img width="922" alt="Screen Shot 2022-07-03 at 1 00 15 AM" src="https://user-images.githubusercontent.com/6318811/177030789-89d349bf-ef75-4af5-a71f-609896d1c8d9.png">
|
| 88 |
+
</a>
|
| 89 |
+
|
| 90 |
+
Watch [a 15-minute talk](https://iclr.cc/virtual/2022/oral/6603) focused on main problems of standard tensor manipulation methods, and how einops improves this process.
|
| 91 |
+
-->
|
| 92 |
+
|
| 93 |
+
## Contents
|
| 94 |
+
|
| 95 |
+
- [Installation](#Installation)
|
| 96 |
+
- [Documentation](https://einops.rocks/)
|
| 97 |
+
- [Tutorial](#Tutorials)
|
| 98 |
+
- [API micro-reference](#API)
|
| 99 |
+
- [Why using einops](#Why-using-einops-notation)
|
| 100 |
+
- [Supported frameworks](#Supported-frameworks)
|
| 101 |
+
- [Citing](#Citing)
|
| 102 |
+
- [Repository](https://github.com/arogozhnikov/einops) and [discussions](https://github.com/arogozhnikov/einops/discussions)
|
| 103 |
+
|
| 104 |
+
## Installation <a name="Installation"></a>
|
| 105 |
+
|
| 106 |
+
Plain and simple:
|
| 107 |
+
```bash
|
| 108 |
+
pip install einops
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
<!--
|
| 112 |
+
`einops` has no mandatory dependencies (code examples also require jupyter, pillow + backends).
|
| 113 |
+
To obtain the latest github version
|
| 114 |
+
|
| 115 |
+
```bash
|
| 116 |
+
pip install https://github.com/arogozhnikov/einops/archive/master.zip
|
| 117 |
+
```
|
| 118 |
+
-->
|
| 119 |
+
|
| 120 |
+
## Tutorials <a name="Tutorials"></a>
|
| 121 |
+
|
| 122 |
+
Tutorials are the most convenient way to see `einops` in action
|
| 123 |
+
|
| 124 |
+
- part 1: [einops fundamentals](https://github.com/arogozhnikov/einops/blob/master/docs/1-einops-basics.ipynb)
|
| 125 |
+
- part 2: [einops for deep learning](https://github.com/arogozhnikov/einops/blob/master/docs/2-einops-for-deep-learning.ipynb)
|
| 126 |
+
- part 3: [packing and unpacking](https://github.com/arogozhnikov/einops/blob/master/docs/4-pack-and-unpack.ipynb)
|
| 127 |
+
- part 4: [improve pytorch code with einops](http://einops.rocks/pytorch-examples.html)
|
| 128 |
+
|
| 129 |
+
Kapil Sachdeva recorded a small [intro to einops](https://www.youtube.com/watch?v=xGy75Pjsqzo).
|
| 130 |
+
|
| 131 |
+
## API <a name="API"></a>
|
| 132 |
+
|
| 133 |
+
`einops` has a minimalistic yet powerful API.
|
| 134 |
+
|
| 135 |
+
Three core operations provided ([einops tutorial](https://github.com/arogozhnikov/einops/blob/master/docs/)
|
| 136 |
+
shows those cover stacking, reshape, transposition, squeeze/unsqueeze, repeat, tile, concatenate, view and numerous reductions)
|
| 137 |
+
|
| 138 |
+
```python
|
| 139 |
+
from einops import rearrange, reduce, repeat
|
| 140 |
+
# rearrange elements according to the pattern
|
| 141 |
+
output_tensor = rearrange(input_tensor, 't b c -> b c t')
|
| 142 |
+
# combine rearrangement and reduction
|
| 143 |
+
output_tensor = reduce(input_tensor, 'b c (h h2) (w w2) -> b h w c', 'mean', h2=2, w2=2)
|
| 144 |
+
# copy along a new axis
|
| 145 |
+
output_tensor = repeat(input_tensor, 'h w -> h w c', c=3)
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
Later additions to the family are `pack` and `unpack` functions (better than stack/split/concatenate):
|
| 149 |
+
|
| 150 |
+
```python
|
| 151 |
+
from einops import pack, unpack
|
| 152 |
+
# pack and unpack allow reversibly 'packing' multiple tensors into one.
|
| 153 |
+
# Packed tensors may be of different dimensionality:
|
| 154 |
+
packed, ps = pack([class_token_bc, image_tokens_bhwc, text_tokens_btc], 'b * c')
|
| 155 |
+
class_emb_bc, image_emb_bhwc, text_emb_btc = unpack(transformer(packed), ps, 'b * c')
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
Finally, einops provides einsum with a support of multi-lettered names:
|
| 159 |
+
|
| 160 |
+
```python
|
| 161 |
+
from einops import einsum, pack, unpack
|
| 162 |
+
# einsum is like ... einsum, generic and flexible dot-product
|
| 163 |
+
# but 1) axes can be multi-lettered 2) pattern goes last 3) works with multiple frameworks
|
| 164 |
+
C = einsum(A, B, 'b t1 head c, b t2 head c -> b head t1 t2')
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
### EinMix
|
| 168 |
+
|
| 169 |
+
`EinMix` is a generic linear layer, perfect for MLP Mixers and similar architectures.
|
| 170 |
+
|
| 171 |
+
### Layers
|
| 172 |
+
|
| 173 |
+
Einops provides layers (`einops` keeps a separate version for each framework) that reflect corresponding functions
|
| 174 |
+
|
| 175 |
+
```python
|
| 176 |
+
from einops.layers.torch import Rearrange, Reduce
|
| 177 |
+
from einops.layers.tensorflow import Rearrange, Reduce
|
| 178 |
+
from einops.layers.flax import Rearrange, Reduce
|
| 179 |
+
from einops.layers.paddle import Rearrange, Reduce
|
| 180 |
+
from einops.layers.chainer import Rearrange, Reduce
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
<details markdown="1">
|
| 184 |
+
<summary>Example of using layers within a pytorch model</summary>
|
| 185 |
+
Example given for pytorch, but code in other frameworks is almost identical
|
| 186 |
+
|
| 187 |
+
```python
|
| 188 |
+
from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, ReLU
|
| 189 |
+
from einops.layers.torch import Rearrange
|
| 190 |
+
|
| 191 |
+
model = Sequential(
|
| 192 |
+
...,
|
| 193 |
+
Conv2d(6, 16, kernel_size=5),
|
| 194 |
+
MaxPool2d(kernel_size=2),
|
| 195 |
+
# flattening without need to write forward
|
| 196 |
+
Rearrange('b c h w -> b (c h w)'),
|
| 197 |
+
Linear(16*5*5, 120),
|
| 198 |
+
ReLU(),
|
| 199 |
+
Linear(120, 10),
|
| 200 |
+
)
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
No more flatten needed!
|
| 204 |
+
|
| 205 |
+
Additionally, torch users will benefit from layers as those are script-able and compile-able.
|
| 206 |
+
</details>
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
## Naming <a name="Naming"></a>
|
| 212 |
+
|
| 213 |
+
`einops` stands for Einstein-Inspired Notation for operations
|
| 214 |
+
(though "Einstein operations" is more attractive and easier to remember).
|
| 215 |
+
|
| 216 |
+
Notation was loosely inspired by Einstein summation (in particular by `numpy.einsum` operation).
|
| 217 |
+
|
| 218 |
+
## Why use `einops` notation?! <a name="Why-using-einops-notation"></a>
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
### Semantic information (being verbose in expectations)
|
| 222 |
+
|
| 223 |
+
```python
|
| 224 |
+
y = x.view(x.shape[0], -1)
|
| 225 |
+
y = rearrange(x, 'b c h w -> b (c h w)')
|
| 226 |
+
```
|
| 227 |
+
While these two lines are doing the same job in *some* context,
|
| 228 |
+
the second one provides information about the input and output.
|
| 229 |
+
In other words, `einops` focuses on interface: *what is the input and output*, not *how* the output is computed.
|
| 230 |
+
|
| 231 |
+
The next operation looks similar:
|
| 232 |
+
|
| 233 |
+
```python
|
| 234 |
+
y = rearrange(x, 'time c h w -> time (c h w)')
|
| 235 |
+
```
|
| 236 |
+
but it gives the reader a hint:
|
| 237 |
+
this is not an independent batch of images we are processing,
|
| 238 |
+
but rather a sequence (video).
|
| 239 |
+
|
| 240 |
+
Semantic information makes the code easier to read and maintain.
|
| 241 |
+
|
| 242 |
+
### Convenient checks
|
| 243 |
+
|
| 244 |
+
Reconsider the same example:
|
| 245 |
+
|
| 246 |
+
```python
|
| 247 |
+
y = x.view(x.shape[0], -1) # x: (batch, 256, 19, 19)
|
| 248 |
+
y = rearrange(x, 'b c h w -> b (c h w)')
|
| 249 |
+
```
|
| 250 |
+
The second line checks that the input has four dimensions,
|
| 251 |
+
but you can also specify particular dimensions.
|
| 252 |
+
That's opposed to just writing comments about shapes since comments don't prevent mistakes, not tested, and without code review tend to be outdated
|
| 253 |
+
```python
|
| 254 |
+
y = x.view(x.shape[0], -1) # x: (batch, 256, 19, 19)
|
| 255 |
+
y = rearrange(x, 'b c h w -> b (c h w)', c=256, h=19, w=19)
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
### Result is strictly determined
|
| 259 |
+
|
| 260 |
+
Below we have at least two ways to define the depth-to-space operation
|
| 261 |
+
```python
|
| 262 |
+
# depth-to-space
|
| 263 |
+
rearrange(x, 'b c (h h2) (w w2) -> b (c h2 w2) h w', h2=2, w2=2)
|
| 264 |
+
rearrange(x, 'b c (h h2) (w w2) -> b (h2 w2 c) h w', h2=2, w2=2)
|
| 265 |
+
```
|
| 266 |
+
There are at least four more ways to do it. Which one is used by the framework?
|
| 267 |
+
|
| 268 |
+
These details are ignored, since *usually* it makes no difference,
|
| 269 |
+
but it can make a big difference (e.g. if you use grouped convolutions in the next stage),
|
| 270 |
+
and you'd like to specify this in your code.
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
### Uniformity
|
| 274 |
+
|
| 275 |
+
```python
|
| 276 |
+
reduce(x, 'b c (x dx) -> b c x', 'max', dx=2)
|
| 277 |
+
reduce(x, 'b c (x dx) (y dy) -> b c x y', 'max', dx=2, dy=3)
|
| 278 |
+
reduce(x, 'b c (x dx) (y dy) (z dz) -> b c x y z', 'max', dx=2, dy=3, dz=4)
|
| 279 |
+
```
|
| 280 |
+
These examples demonstrated that we don't use separate operations for 1d/2d/3d pooling,
|
| 281 |
+
those are all defined in a uniform way.
|
| 282 |
+
|
| 283 |
+
Space-to-depth and depth-to space are defined in many frameworks but how about width-to-height? Here you go:
|
| 284 |
+
|
| 285 |
+
```python
|
| 286 |
+
rearrange(x, 'b c h (w w2) -> b c (h w2) w', w2=2)
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
### Framework independent behavior
|
| 290 |
+
|
| 291 |
+
Even simple functions are defined differently by different frameworks
|
| 292 |
+
|
| 293 |
+
```python
|
| 294 |
+
y = x.flatten() # or flatten(x)
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
+
Suppose `x`'s shape was `(3, 4, 5)`, then `y` has shape ...
|
| 298 |
+
|
| 299 |
+
- numpy, pytorch, cupy, chainer: `(60,)`
|
| 300 |
+
- keras, tensorflow.layers, gluon: `(3, 20)`
|
| 301 |
+
|
| 302 |
+
`einops` works the same way in all frameworks.
|
| 303 |
+
|
| 304 |
+
### Independence of framework terminology
|
| 305 |
+
|
| 306 |
+
Example: `tile` vs `repeat` causes lots of confusion. To copy image along width:
|
| 307 |
+
```python
|
| 308 |
+
np.tile(image, (1, 2)) # in numpy
|
| 309 |
+
image.repeat(1, 2) # pytorch's repeat ~ numpy's tile
|
| 310 |
+
```
|
| 311 |
+
|
| 312 |
+
With einops you don't need to decipher which axis was repeated:
|
| 313 |
+
```python
|
| 314 |
+
repeat(image, 'h w -> h (tile w)', tile=2) # in numpy
|
| 315 |
+
repeat(image, 'h w -> h (tile w)', tile=2) # in pytorch
|
| 316 |
+
repeat(image, 'h w -> h (tile w)', tile=2) # in tf
|
| 317 |
+
repeat(image, 'h w -> h (tile w)', tile=2) # in jax
|
| 318 |
+
repeat(image, 'h w -> h (tile w)', tile=2) # in cupy
|
| 319 |
+
... (etc.)
|
| 320 |
+
```
|
| 321 |
+
|
| 322 |
+
[Testimonials](https://einops.rocks/pages/testimonials/) provide users' perspective on the same question.
|
| 323 |
+
|
| 324 |
+
## Supported frameworks <a name="Supported-frameworks"></a>
|
| 325 |
+
|
| 326 |
+
Einops works with ...
|
| 327 |
+
|
| 328 |
+
- [numpy](http://www.numpy.org/)
|
| 329 |
+
- [pytorch](https://pytorch.org/)
|
| 330 |
+
- [tensorflow](https://www.tensorflow.org/)
|
| 331 |
+
- [jax](https://github.com/google/jax)
|
| 332 |
+
- [cupy](https://cupy.chainer.org/)
|
| 333 |
+
- [chainer](https://chainer.org/)
|
| 334 |
+
- [tf.keras](https://www.tensorflow.org/guide/keras)
|
| 335 |
+
- [flax](https://github.com/google/flax) (experimental)
|
| 336 |
+
- [paddle](https://github.com/PaddlePaddle/Paddle) (experimental)
|
| 337 |
+
- [oneflow](https://github.com/Oneflow-Inc/oneflow) (community)
|
| 338 |
+
- [tinygrad](https://github.com/tinygrad/tinygrad) (community)
|
| 339 |
+
|
| 340 |
+
Additionally, starting from einops 0.7.0 einops can be used with any framework that supports [Python array API standard](https://data-apis.org/array-api/latest/API_specification/index.html)
|
| 341 |
+
|
| 342 |
+
## Citing einops <a name="Citing"></a>
|
| 343 |
+
|
| 344 |
+
Please use the following bibtex record
|
| 345 |
+
|
| 346 |
+
```text
|
| 347 |
+
@inproceedings{
|
| 348 |
+
rogozhnikov2022einops,
|
| 349 |
+
title={Einops: Clear and Reliable Tensor Manipulations with Einstein-like Notation},
|
| 350 |
+
author={Alex Rogozhnikov},
|
| 351 |
+
booktitle={International Conference on Learning Representations},
|
| 352 |
+
year={2022},
|
| 353 |
+
url={https://openreview.net/forum?id=oapKSVM2bcj}
|
| 354 |
+
}
|
| 355 |
+
```
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
## Supported python versions
|
| 359 |
+
|
| 360 |
+
`einops` works with python 3.8 or later.
|
vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
einops-0.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
einops-0.8.0.dist-info/METADATA,sha256=5hTpaWnwYNe3QvhbXYTpA_LUJ2lSlyspSc0gRGni7sY,12926
|
| 3 |
+
einops-0.8.0.dist-info/RECORD,,
|
| 4 |
+
einops-0.8.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 5 |
+
einops-0.8.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87
|
| 6 |
+
einops-0.8.0.dist-info/licenses/LICENSE,sha256=MNmENkKW9R_67K1LAe4SfpUlDFBokY1LZvyWIGcj5DQ,1073
|
| 7 |
+
einops/__init__.py,sha256=UdixJ9CShlEOQfw0xcU6zYtrAn6Durgh6jCQWdcaQK4,422
|
| 8 |
+
einops/__pycache__/__init__.cpython-310.pyc,,
|
| 9 |
+
einops/__pycache__/_backends.cpython-310.pyc,,
|
| 10 |
+
einops/__pycache__/_torch_specific.cpython-310.pyc,,
|
| 11 |
+
einops/__pycache__/array_api.cpython-310.pyc,,
|
| 12 |
+
einops/__pycache__/einops.cpython-310.pyc,,
|
| 13 |
+
einops/__pycache__/packing.cpython-310.pyc,,
|
| 14 |
+
einops/__pycache__/parsing.cpython-310.pyc,,
|
| 15 |
+
einops/_backends.py,sha256=VHPPrL1mf0PDTvyFPZvmZeTqGJoWflqv7b-eoJUHudo,21081
|
| 16 |
+
einops/_torch_specific.py,sha256=yMaQeqAZhBLWR1Q-Jv6uRINJfzROhLb-rzKKevpefUU,4138
|
| 17 |
+
einops/array_api.py,sha256=FcKZSo7l8jC5HL8qudutz1K5x9cFpwACMDcjfbvEKmQ,5251
|
| 18 |
+
einops/einops.py,sha256=AYZe5yMlH-EXO0MWFv27ajyPdVTFpYloaSCRM9jw5sA,37252
|
| 19 |
+
einops/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 20 |
+
einops/experimental/__pycache__/__init__.cpython-310.pyc,,
|
| 21 |
+
einops/experimental/__pycache__/indexing.cpython-310.pyc,,
|
| 22 |
+
einops/experimental/indexing.py,sha256=4NtRNmSOrpUURvwhrbbGNK3NeTxHI4EW8R6ct3JZyLw,14868
|
| 23 |
+
einops/layers/__init__.py,sha256=vBtnAt2afs4QlqpeFU4dlZNxBuC9IXl3fmilk-2OzHM,3747
|
| 24 |
+
einops/layers/__pycache__/__init__.cpython-310.pyc,,
|
| 25 |
+
einops/layers/__pycache__/_einmix.cpython-310.pyc,,
|
| 26 |
+
einops/layers/__pycache__/chainer.cpython-310.pyc,,
|
| 27 |
+
einops/layers/__pycache__/flax.cpython-310.pyc,,
|
| 28 |
+
einops/layers/__pycache__/keras.cpython-310.pyc,,
|
| 29 |
+
einops/layers/__pycache__/oneflow.cpython-310.pyc,,
|
| 30 |
+
einops/layers/__pycache__/paddle.cpython-310.pyc,,
|
| 31 |
+
einops/layers/__pycache__/tensorflow.cpython-310.pyc,,
|
| 32 |
+
einops/layers/__pycache__/torch.cpython-310.pyc,,
|
| 33 |
+
einops/layers/_einmix.py,sha256=0cl3r4Xp44S2HO-tx0MHa4cMFD2KJXpG5O-4gJM5AtU,8464
|
| 34 |
+
einops/layers/chainer.py,sha256=hUB-XSjN5CP8zALZtalL3n2lQkq7vymftRI8okEMO2Q,1861
|
| 35 |
+
einops/layers/flax.py,sha256=zFy83gSLRm31cLuKFRvZ82_HsefnXPbRvkKZh1KkC1I,2536
|
| 36 |
+
einops/layers/keras.py,sha256=-7So0w94phvf9HdW0xi2mSeBg02qVPvAyfp_1XR02NM,212
|
| 37 |
+
einops/layers/oneflow.py,sha256=YEPzz4xc7BDRQfb8ulD3teqQJdbO6qQg7Z4KIPVTLz8,1864
|
| 38 |
+
einops/layers/paddle.py,sha256=8cRZQ8BT9vYEczh7pNProuTM_3XjLty2ht2sdvXNFiI,1907
|
| 39 |
+
einops/layers/tensorflow.py,sha256=T9uhSVwbXREahc31ARAHoN5K-7zsuS8NRNPdY6Zk1Bc,3324
|
| 40 |
+
einops/layers/torch.py,sha256=504G99kEgy7dk1UPBbj9hzJmZkAHwVhMDFN_8J-p3C8,2399
|
| 41 |
+
einops/packing.py,sha256=Ln2lAMko9hobi_qd-4dPtQY0Ks5hRK7x-5FthL2gunk,7654
|
| 42 |
+
einops/parsing.py,sha256=xbqcvwReLiROEucoegZ20WQiEHlLg0uxo_vYoezKB_4,6746
|
| 43 |
+
einops/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/REQUESTED
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: hatchling 1.24.2
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
vllm/lib/python3.10/site-packages/einops-0.8.0.dist-info/licenses/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2018 Alex Rogozhnikov
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The JSON Schema meta-schemas and vocabularies, exposed as a Registry.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from referencing.jsonschema import EMPTY_REGISTRY as _EMPTY_REGISTRY
|
| 6 |
+
|
| 7 |
+
from jsonschema_specifications._core import _schemas
|
| 8 |
+
|
| 9 |
+
#: A `referencing.jsonschema.SchemaRegistry` containing all of the official
|
| 10 |
+
#: meta-schemas and vocabularies.
|
| 11 |
+
REGISTRY = (_schemas() @ _EMPTY_REGISTRY).crawl()
|
| 12 |
+
__all__ = ["REGISTRY"]
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (460 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc
ADDED
|
Binary file (901 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/_core.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Load all the JSON Schema specification's official schemas.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from importlib.resources import files
|
| 9 |
+
except ImportError:
|
| 10 |
+
from importlib_resources import ( # type: ignore[import-not-found, no-redef]
|
| 11 |
+
files,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
from referencing import Resource
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _schemas():
|
| 18 |
+
"""
|
| 19 |
+
All schemas we ship.
|
| 20 |
+
"""
|
| 21 |
+
# importlib.resources.abc.Traversal doesn't have nice ways to do this that
|
| 22 |
+
# I'm aware of...
|
| 23 |
+
#
|
| 24 |
+
# It can't recurse arbitrarily, e.g. no ``.glob()``.
|
| 25 |
+
#
|
| 26 |
+
# So this takes some liberties given the real layout of what we ship
|
| 27 |
+
# (only 2 levels of nesting, no directories within the second level).
|
| 28 |
+
|
| 29 |
+
for version in files(__package__).joinpath("schemas").iterdir():
|
| 30 |
+
if version.name.startswith("."):
|
| 31 |
+
continue
|
| 32 |
+
for child in version.iterdir():
|
| 33 |
+
children = [child] if child.is_file() else child.iterdir()
|
| 34 |
+
for path in children:
|
| 35 |
+
if path.name.startswith("."):
|
| 36 |
+
continue
|
| 37 |
+
contents = json.loads(path.read_text(encoding="utf-8"))
|
| 38 |
+
yield Resource.from_contents(contents)
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/schema",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/core": true,
|
| 6 |
+
"https://json-schema.org/draft/2020-12/vocab/applicator": true,
|
| 7 |
+
"https://json-schema.org/draft/2020-12/vocab/unevaluated": true,
|
| 8 |
+
"https://json-schema.org/draft/2020-12/vocab/validation": true,
|
| 9 |
+
"https://json-schema.org/draft/2020-12/vocab/meta-data": true,
|
| 10 |
+
"https://json-schema.org/draft/2020-12/vocab/format-annotation": true,
|
| 11 |
+
"https://json-schema.org/draft/2020-12/vocab/content": true
|
| 12 |
+
},
|
| 13 |
+
"$dynamicAnchor": "meta",
|
| 14 |
+
|
| 15 |
+
"title": "Core and Validation specifications meta-schema",
|
| 16 |
+
"allOf": [
|
| 17 |
+
{"$ref": "meta/core"},
|
| 18 |
+
{"$ref": "meta/applicator"},
|
| 19 |
+
{"$ref": "meta/unevaluated"},
|
| 20 |
+
{"$ref": "meta/validation"},
|
| 21 |
+
{"$ref": "meta/meta-data"},
|
| 22 |
+
{"$ref": "meta/format-annotation"},
|
| 23 |
+
{"$ref": "meta/content"}
|
| 24 |
+
],
|
| 25 |
+
"type": ["object", "boolean"],
|
| 26 |
+
"$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.",
|
| 27 |
+
"properties": {
|
| 28 |
+
"definitions": {
|
| 29 |
+
"$comment": "\"definitions\" has been replaced by \"$defs\".",
|
| 30 |
+
"type": "object",
|
| 31 |
+
"additionalProperties": { "$dynamicRef": "#meta" },
|
| 32 |
+
"deprecated": true,
|
| 33 |
+
"default": {}
|
| 34 |
+
},
|
| 35 |
+
"dependencies": {
|
| 36 |
+
"$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.",
|
| 37 |
+
"type": "object",
|
| 38 |
+
"additionalProperties": {
|
| 39 |
+
"anyOf": [
|
| 40 |
+
{ "$dynamicRef": "#meta" },
|
| 41 |
+
{ "$ref": "meta/validation#/$defs/stringArray" }
|
| 42 |
+
]
|
| 43 |
+
},
|
| 44 |
+
"deprecated": true,
|
| 45 |
+
"default": {}
|
| 46 |
+
},
|
| 47 |
+
"$recursiveAnchor": {
|
| 48 |
+
"$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".",
|
| 49 |
+
"$ref": "meta/core#/$defs/anchorString",
|
| 50 |
+
"deprecated": true
|
| 51 |
+
},
|
| 52 |
+
"$recursiveRef": {
|
| 53 |
+
"$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".",
|
| 54 |
+
"$ref": "meta/core#/$defs/uriReferenceString",
|
| 55 |
+
"deprecated": true
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
}
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/content",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/content": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Content vocabulary meta-schema",
|
| 10 |
+
|
| 11 |
+
"type": ["object", "boolean"],
|
| 12 |
+
"properties": {
|
| 13 |
+
"contentEncoding": { "type": "string" },
|
| 14 |
+
"contentMediaType": { "type": "string" },
|
| 15 |
+
"contentSchema": { "$dynamicRef": "#meta" }
|
| 16 |
+
}
|
| 17 |
+
}
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/core",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/core": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Core vocabulary meta-schema",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"$id": {
|
| 13 |
+
"$ref": "#/$defs/uriReferenceString",
|
| 14 |
+
"$comment": "Non-empty fragments not allowed.",
|
| 15 |
+
"pattern": "^[^#]*#?$"
|
| 16 |
+
},
|
| 17 |
+
"$schema": { "$ref": "#/$defs/uriString" },
|
| 18 |
+
"$ref": { "$ref": "#/$defs/uriReferenceString" },
|
| 19 |
+
"$anchor": { "$ref": "#/$defs/anchorString" },
|
| 20 |
+
"$dynamicRef": { "$ref": "#/$defs/uriReferenceString" },
|
| 21 |
+
"$dynamicAnchor": { "$ref": "#/$defs/anchorString" },
|
| 22 |
+
"$vocabulary": {
|
| 23 |
+
"type": "object",
|
| 24 |
+
"propertyNames": { "$ref": "#/$defs/uriString" },
|
| 25 |
+
"additionalProperties": {
|
| 26 |
+
"type": "boolean"
|
| 27 |
+
}
|
| 28 |
+
},
|
| 29 |
+
"$comment": {
|
| 30 |
+
"type": "string"
|
| 31 |
+
},
|
| 32 |
+
"$defs": {
|
| 33 |
+
"type": "object",
|
| 34 |
+
"additionalProperties": { "$dynamicRef": "#meta" }
|
| 35 |
+
}
|
| 36 |
+
},
|
| 37 |
+
"$defs": {
|
| 38 |
+
"anchorString": {
|
| 39 |
+
"type": "string",
|
| 40 |
+
"pattern": "^[A-Za-z_][-A-Za-z0-9._]*$"
|
| 41 |
+
},
|
| 42 |
+
"uriString": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"format": "uri"
|
| 45 |
+
},
|
| 46 |
+
"uriReferenceString": {
|
| 47 |
+
"type": "string",
|
| 48 |
+
"format": "uri-reference"
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
}
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2019-09/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2019-09/meta/format",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2019-09/vocab/format": true
|
| 6 |
+
},
|
| 7 |
+
"$recursiveAnchor": true,
|
| 8 |
+
|
| 9 |
+
"title": "Format vocabulary meta-schema",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"format": { "type": "string" }
|
| 13 |
+
}
|
| 14 |
+
}
|
vllm/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/format-annotation",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/format-annotation": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Format vocabulary meta-schema for annotation results",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"format": { "type": "string" }
|
| 13 |
+
}
|
| 14 |
+
}
|