repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
scipy
|
scipy-main/scipy/stats/tests/test_contingency.py
|
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_approx_equal,
assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.special import xlogy
from scipy.stats.contingency import (margins, expected_freq,
chi2_contingency, association)
def test_margins():
a = np.array([1])
m = margins(a)
assert_equal(len(m), 1)
m0 = m[0]
assert_array_equal(m0, np.array([1]))
a = np.array([[1]])
m0, m1 = margins(a)
expected0 = np.array([[1]])
expected1 = np.array([[1]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(12).reshape(2, 6)
m0, m1 = margins(a)
expected0 = np.array([[15], [51]])
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(24).reshape(2, 3, 4)
m0, m1, m2 = margins(a)
expected0 = np.array([[[66]], [[210]]])
expected1 = np.array([[[60], [92], [124]]])
expected2 = np.array([[[60, 66, 72, 78]]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
assert_array_equal(m2, expected2)
def test_expected_freq():
assert_array_equal(expected_freq([1]), np.array([1.0]))
observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
e = expected_freq(observed)
assert_array_equal(e, np.ones_like(observed))
observed = np.array([[10, 10, 20], [20, 20, 20]])
e = expected_freq(observed)
correct = np.array([[12., 12., 16.], [18., 18., 24.]])
assert_array_almost_equal(e, correct)
def test_chi2_contingency_trivial():
# Some very simple tests for chi2_contingency.
# A trivial case
obs = np.array([[1, 2], [1, 2]])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 1)
assert_array_equal(obs, expected)
# A *really* trivial case: 1-D data.
obs = np.array([1, 2, 3])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 0)
assert_array_equal(obs, expected)
def test_chi2_contingency_R():
# Some test cases that were computed independently, using R.
# Rcode = \
# """
# # Data vector.
# data <- c(
# 12, 34, 23, 4, 47, 11,
# 35, 31, 11, 34, 10, 18,
# 12, 32, 9, 18, 13, 19,
# 12, 12, 14, 9, 33, 25
# )
#
# # Create factor tags:r=rows, c=columns, t=tiers
# r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
# c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
# t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
#
# # 3-way Chi squared test of independence
# s = summary(xtabs(data~r+c+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + t)
# Number of cases in table: 478
# Number of factors: 3
# Test for independence of all factors:
# Chisq = 102.17, df = 17, p-value = 3.514e-14
# """
obs = np.array(
[[[12, 34, 23],
[35, 31, 11],
[12, 32, 9],
[12, 12, 14]],
[[4, 47, 11],
[34, 10, 18],
[18, 13, 19],
[9, 33, 25]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 102.17, significant=5)
assert_approx_equal(p, 3.514e-14, significant=4)
assert_equal(dof, 17)
# Rcode = \
# """
# # Data vector.
# data <- c(
# #
# 12, 17,
# 11, 16,
# #
# 11, 12,
# 15, 16,
# #
# 23, 15,
# 30, 22,
# #
# 14, 17,
# 15, 16
# )
#
# # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
# r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
# c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
# d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
# t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
#
# # 4-way Chi squared test of independence
# s = summary(xtabs(data~r+c+d+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + d + t)
# Number of cases in table: 262
# Number of factors: 4
# Test for independence of all factors:
# Chisq = 8.758, df = 11, p-value = 0.6442
# """
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 8.758, significant=4)
assert_approx_equal(p, 0.6442, significant=4)
assert_equal(dof, 11)
def test_chi2_contingency_g():
c = np.array([[15, 60], [15, 90]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
correction=False)
assert_allclose(g, 2*xlogy(c, c/e).sum())
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
correction=True)
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
c = np.array([[10, 12, 10], [12, 10, 10]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
assert_allclose(g, 2*xlogy(c, c/e).sum())
def test_chi2_contingency_bad_args():
# Test that "bad" inputs raise a ValueError.
# Negative value in the array of observed frequencies.
obs = np.array([[-1, 10], [1, 2]])
assert_raises(ValueError, chi2_contingency, obs)
# The zeros in this will result in zeros in the array
# of expected frequencies.
obs = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, chi2_contingency, obs)
# A degenerate case: `observed` has size 0.
obs = np.empty((0, 8))
assert_raises(ValueError, chi2_contingency, obs)
def test_chi2_contingency_yates_gh13875():
# Magnitude of Yates' continuity correction should not exceed difference
# between expected and observed value of the statistic; see gh-13875
observed = np.array([[1573, 3], [4, 0]])
p = chi2_contingency(observed)[1]
assert_allclose(p, 1, rtol=1e-12)
@pytest.mark.parametrize("correction", [False, True])
def test_result(correction):
obs = np.array([[1, 2], [1, 2]])
res = chi2_contingency(obs, correction=correction)
assert_equal((res.statistic, res.pvalue, res.dof, res.expected_freq), res)
def test_bad_association_args():
# Invalid Test Statistic
assert_raises(ValueError, association, [[1, 2], [3, 4]], "X")
# Invalid array shape
assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer")
# chi2_contingency exception
assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer')
# Invalid Array Item Data Type
assert_raises(ValueError, association,
np.array([[1, 2], ["dd", 4]], dtype=object), 'cramer')
@pytest.mark.parametrize('stat, expected',
[('cramer', 0.09222412010290792),
('tschuprow', 0.0775509319944633),
('pearson', 0.12932925727138758)])
def test_assoc(stat, expected):
# 2d Array
obs1 = np.array([[12, 13, 14, 15, 16],
[17, 16, 18, 19, 11],
[9, 15, 14, 12, 11]])
a = association(observed=obs1, method=stat)
assert_allclose(a, expected)
| 7,706
| 30.847107
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_continuous_basic.py
|
import sys
import numpy as np
import numpy.testing as npt
import pytest
from pytest import raises as assert_raises
from scipy.integrate import IntegrationWarning
import itertools
from scipy import stats
from .common_tests import (check_normalization, check_moment,
check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_entropy_vect_scale,
check_edge_support, check_named_args,
check_random_state_property,
check_meth_dtype, check_ppf_dtype,
check_cmplx_deriv,
check_pickling, check_rvs_broadcast,
check_freezing, check_munp_expect,)
from scipy.stats._distr_params import distcont
from scipy.stats._distn_infrastructure import rv_continuous_frozen
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
# Note that you need to add new distributions you want tested
# to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
_IS_32BIT = (sys.maxsize < 2**32)
# For skipping test_cont_basic
distslow = ['recipinvgauss', 'vonmises', 'kappa4', 'vonmises_line',
'gausshyper', 'norminvgauss', 'geninvgauss', 'genhyperbolic',
'truncnorm', 'truncweibull_min']
# distxslow are sorted by speed (very slow to slow)
distxslow = ['studentized_range', 'kstwo', 'ksone', 'wrapcauchy', 'genexpon']
# For skipping test_moments, which is already marked slow
distxslow_test_moments = ['studentized_range', 'vonmises', 'vonmises_line',
'ksone', 'kstwo', 'recipinvgauss', 'genexpon']
# skip check_fit_args (test is slow)
skip_fit_test_mle = ['exponpow', 'exponweib', 'gausshyper', 'genexpon',
'halfgennorm', 'gompertz', 'johnsonsb', 'johnsonsu',
'kappa4', 'ksone', 'kstwo', 'kstwobign', 'mielke', 'ncf',
'nct', 'powerlognorm', 'powernorm', 'recipinvgauss',
'trapezoid', 'vonmises', 'vonmises_line', 'levy_stable',
'rv_histogram_instance', 'studentized_range']
# these were really slow in `test_fit`.py.
# note that this list is used to skip both fit_test and fit_fix tests
slow_fit_test_mm = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
'kappa4', 'kstwobign', 'recipinvgauss',
'trapezoid', 'truncexpon', 'vonmises', 'vonmises_line',
'studentized_range']
# pearson3 fails due to something weird
# the first list fails due to non-finite distribution moments encountered
# most of the rest fail due to integration warnings
# pearson3 is overriden as not implemented due to gh-11746
fail_fit_test_mm = (['alpha', 'betaprime', 'bradford', 'burr', 'burr12',
'cauchy', 'crystalball', 'f', 'fisk', 'foldcauchy',
'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
'kappa3', 'levy', 'levy_l', 'loglaplace', 'lomax',
'mielke', 'nakagami', 'ncf', 'skewcauchy', 't',
'tukeylambda', 'invweibull', 'rel_breitwigner']
+ ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
'nct', 'pareto', 'powernorm', 'powerlognorm']
+ ['pearson3'])
skip_fit_test = {"MLE": skip_fit_test_mle,
"MM": slow_fit_test_mm + fail_fit_test_mm}
# skip check_fit_args_fix (test is slow)
skip_fit_fix_test_mle = ['burr', 'exponpow', 'exponweib', 'gausshyper',
'genexpon', 'halfgennorm', 'gompertz', 'johnsonsb',
'johnsonsu', 'kappa4', 'ksone', 'kstwo', 'kstwobign',
'levy_stable', 'mielke', 'ncf', 'ncx2',
'powerlognorm', 'powernorm', 'rdist', 'recipinvgauss',
'trapezoid', 'truncpareto', 'vonmises', 'vonmises_line',
'studentized_range']
# the first list fails due to non-finite distribution moments encountered
# most of the rest fail due to integration warnings
# pearson3 is overriden as not implemented due to gh-11746
fail_fit_fix_test_mm = (['alpha', 'betaprime', 'burr', 'burr12', 'cauchy',
'crystalball', 'f', 'fisk', 'foldcauchy',
'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
'kappa3', 'levy', 'levy_l', 'loglaplace', 'lomax',
'mielke', 'nakagami', 'ncf', 'nct', 'skewcauchy', 't',
'truncpareto', 'invweibull']
+ ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
'pareto', 'powernorm', 'powerlognorm']
+ ['pearson3'])
skip_fit_fix_test = {"MLE": skip_fit_fix_test_mle,
"MM": slow_fit_test_mm + fail_fit_fix_test_mm}
# These distributions fail the complex derivative test below.
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
# on the implementation details of corresponding special functions.
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
fails_cmplx = {'argus', 'beta', 'betaprime', 'chi', 'chi2', 'cosine',
'dgamma', 'dweibull', 'erlang', 'f', 'foldcauchy', 'gamma',
'gausshyper', 'gengamma', 'genhyperbolic',
'geninvgauss', 'gennorm', 'genpareto',
'halfcauchy', 'halfgennorm', 'invgamma',
'ksone', 'kstwo', 'kstwobign', 'levy_l', 'loggamma',
'logistic', 'loguniform', 'maxwell', 'nakagami',
'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3',
'powerlaw', 'rdist', 'reciprocal', 'rice',
'skewnorm', 't', 'truncweibull_min',
'tukeylambda', 'vonmises', 'vonmises_line',
'rv_histogram_instance', 'truncnorm', 'studentized_range',
'johnsonsb', 'halflogistic', 'rel_breitwigner'}
# rv_histogram instances, with uniform and non-uniform bins;
# stored as (dist, arg) tuples for cases_test_cont_basic
# and cases_test_moments.
histogram_test_instances = []
case1 = {'a': [1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
6, 6, 6, 7, 7, 7, 8, 8, 9], 'bins': 8} # equal width bins
case2 = {'a': [1, 1], 'bins': [0, 1, 10]} # unequal width bins
for case, density in itertools.product([case1, case2], [True, False]):
_hist = np.histogram(**case, density=density)
_rv_hist = stats.rv_histogram(_hist, density=density)
histogram_test_instances.append((_rv_hist, tuple()))
def cases_test_cont_basic():
for distname, arg in distcont[:] + histogram_test_instances:
if distname == 'levy_stable':
continue
elif distname in distslow:
yield pytest.param(distname, arg, marks=pytest.mark.slow)
elif distname in distxslow:
yield pytest.param(distname, arg, marks=pytest.mark.xslow)
else:
yield distname, arg
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
@pytest.mark.parametrize('sn, n_fit_samples', [(500, 200)])
def test_cont_basic(distname, arg, sn, n_fit_samples):
# this test skips slow distributions
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
rng = np.random.RandomState(765456)
rvs = distfn.rvs(size=sn, *arg, random_state=rng)
m, v = distfn.stats(*arg)
if distname not in {'laplace_asymmetric'}:
check_sample_meanvar_(m, v, rvs)
check_cdf_ppf(distfn, arg, distname)
check_sf_isf(distfn, arg, distname)
check_cdf_sf(distfn, arg, distname)
check_ppf_isf(distfn, arg, distname)
check_pdf(distfn, arg, distname)
check_pdf_logpdf(distfn, arg, distname)
check_pdf_logpdf_at_endpoints(distfn, arg, distname)
check_cdf_logcdf(distfn, arg, distname)
check_sf_logsf(distfn, arg, distname)
check_ppf_broadcast(distfn, arg, distname)
alpha = 0.01
if distname == 'rv_histogram_instance':
check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
elif distname != 'geninvgauss':
# skip kstest for geninvgauss since cdf is too slow; see test for
# rv generation in TestGenInvGauss in test_distributions.py
check_distribution_rvs(distname, arg, alpha, rvs)
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'truncpareto': 3.2, 'tukeylambda': 0.3,
'rv_histogram_instance': 5.0}
x = spec_x.get(distname, 0.5)
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
check_named_args(distfn, x, arg, locscale_defaults, meths)
check_random_state_property(distfn, arg)
if distname in ['rel_breitwigner'] and _IS_32BIT:
# gh18414
pytest.skip("fails on Linux 32-bit")
else:
check_pickling(distfn, arg)
check_freezing(distfn, arg)
# Entropy
if distname not in ['kstwobign', 'kstwo', 'ncf']:
check_entropy(distfn, arg, distname)
if distfn.numargs == 0:
check_vecentropy(distfn, arg)
if (distfn.__class__._entropy != stats.rv_continuous._entropy
and distname != 'vonmises'):
check_private_entropy(distfn, arg, stats.rv_continuous)
with npt.suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "Extremely bad integrand")
sup.filter(RuntimeWarning, "invalid value")
check_entropy_vect_scale(distfn, arg)
check_retrieving_support(distfn, arg)
check_edge_support(distfn, arg)
check_meth_dtype(distfn, arg, meths)
check_ppf_dtype(distfn, arg)
if distname not in fails_cmplx:
check_cmplx_deriv(distfn, arg)
if distname != 'truncnorm':
check_ppf_private(distfn, arg, distname)
for method in ["MLE", "MM"]:
if distname not in skip_fit_test[method]:
check_fit_args(distfn, arg, rvs[:n_fit_samples], method)
if distname not in skip_fit_fix_test[method]:
check_fit_args_fix(distfn, arg, rvs[:n_fit_samples], method)
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
def test_rvs_scalar(distname, arg):
# rvs should return a scalar when given scalar arguments (gh-12428)
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
assert np.isscalar(distfn.rvs(*arg))
assert np.isscalar(distfn.rvs(*arg, size=()))
assert np.isscalar(distfn.rvs(*arg, size=None))
def test_levy_stable_random_state_property():
# levy_stable only implements rvs(), so it is skipped in the
# main loop in test_cont_basic(). Here we apply just the test
# check_random_state_property to levy_stable.
check_random_state_property(stats.levy_stable, (0.5, 0.1))
def cases_test_moments():
fail_normalization = set()
fail_higher = {'ncf'}
fail_moment = {'johnsonsu'} # generic `munp` is inaccurate for johnsonsu
for distname, arg in distcont[:] + histogram_test_instances:
if distname == 'levy_stable':
continue
if distname in distxslow_test_moments:
yield pytest.param(distname, arg, True, True, True, True,
marks=pytest.mark.xslow(reason="too slow"))
continue
cond1 = distname not in fail_normalization
cond2 = distname not in fail_higher
cond3 = distname not in fail_moment
marks = list()
# Currently unused, `marks` can be used to add a timeout to a test of
# a specific distribution. For example, this shows how a timeout could
# be added for the 'skewnorm' distribution:
#
# marks = list()
# if distname == 'skewnorm':
# marks.append(pytest.mark.timeout(300))
yield pytest.param(distname, arg, cond1, cond2, cond3,
False, marks=marks)
if not cond1 or not cond2 or not cond3:
# Run the distributions that have issues twice, once skipping the
# not_ok parts, once with the not_ok parts but marked as knownfail
yield pytest.param(distname, arg, True, True, True, True,
marks=[pytest.mark.xfail] + marks)
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,moment_ok,'
'is_xfailing',
cases_test_moments())
def test_moments(distname, arg, normalization_ok, higher_ok, moment_ok,
is_xfailing):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
with npt.suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The integral is probably divergent, or slowly convergent.")
sup.filter(IntegrationWarning,
"The maximum number of subdivisions.")
sup.filter(IntegrationWarning,
"The algorithm does not converge.")
if is_xfailing:
sup.filter(IntegrationWarning)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
with np.errstate(all="ignore"):
if normalization_ok:
check_normalization(distfn, arg, distname)
if higher_ok:
check_mean_expect(distfn, arg, m, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
check_var_expect(distfn, arg, m, v, distname)
check_kurt_expect(distfn, arg, m, v, k, distname)
check_munp_expect(distfn, arg, distname)
check_loc_scale(distfn, arg, m, v, distname)
if moment_ok:
check_moment(distfn, arg, m, v, distname)
@pytest.mark.parametrize('dist,shape_args', distcont)
def test_rvs_broadcast(dist, shape_args):
if dist in ['gausshyper', 'studentized_range']:
pytest.skip("too slow")
if dist in ['rel_breitwigner'] and _IS_32BIT:
# gh18414
pytest.skip("fails on Linux 32-bit")
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['argus', 'betaprime', 'dgamma', 'dweibull',
'exponnorm', 'genhyperbolic', 'geninvgauss',
'levy_stable', 'nct', 'norminvgauss', 'rice',
'skewnorm', 'semicircular', 'gennorm', 'loggamma']
distfunc = getattr(stats, dist)
loc = np.zeros(2)
scale = np.ones((3, 1))
nargs = distfunc.numargs
allargs = []
bshape = [3, 2]
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 4,) + (1,)*(k + 2)
allargs.append(shape_args[k]*np.ones(shp))
bshape.insert(0, k + 4)
allargs.extend([loc, scale])
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
# Expected values of the SF, CDF, PDF were computed using
# mpmath with mpmath.mp.dps = 50 and output at 20:
#
# def ks(x, n):
# x = mpmath.mpf(x)
# logp = -mpmath.power(6.0*n*x+1.0, 2)/18.0/n
# sf, cdf = mpmath.exp(logp), -mpmath.expm1(logp)
# pdf = (6.0*n*x+1.0) * 2 * sf/3
# print(mpmath.nstr(sf, 20), mpmath.nstr(cdf, 20), mpmath.nstr(pdf, 20))
#
# Tests use 1/n < x < 1-1/n and n > 1e6 to use the asymptotic computation.
# Larger x has a smaller sf.
@pytest.mark.parametrize('x,n,sf,cdf,pdf,rtol',
[(2.0e-5, 1000000000,
0.44932297307934442379, 0.55067702692065557621,
35946.137394996276407, 5e-15),
(2.0e-9, 1000000000,
0.99999999061111115519, 9.3888888448132728224e-9,
8.6666665852962971765, 5e-14),
(5.0e-4, 1000000000,
7.1222019433090374624e-218, 1.0,
1.4244408634752704094e-211, 5e-14)])
def test_gh17775_regression(x, n, sf, cdf, pdf, rtol):
# Regression test for gh-17775. In scipy 1.9.3 and earlier,
# these test would fail.
#
# KS one asymptotic sf ~ e^(-(6nx+1)^2 / 18n)
# Given a large 32-bit integer n, 6n will overflow in the c implementation.
# Example of broken behaviour:
# ksone.sf(2.0e-5, 1000000000) == 0.9374359693473666
ks = stats.ksone
vals = np.array([ks.sf(x, n), ks.cdf(x, n), ks.pdf(x, n)])
expected = np.array([sf, cdf, pdf])
npt.assert_allclose(vals, expected, rtol=rtol)
# The sf+cdf must sum to 1.0.
npt.assert_equal(vals[0] + vals[1], 1.0)
# Check inverting the (potentially very small) sf (uses a lower tolerance)
npt.assert_allclose([ks.isf(sf, n)], [x], rtol=1e-8)
def test_rvs_gh2069_regression():
# Regression tests for gh-2069. In scipy 0.17 and earlier,
# these tests would fail.
#
# A typical example of the broken behavior:
# >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
# array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
rng = np.random.RandomState(123)
vals = stats.norm.rvs(loc=np.zeros(5), scale=1, random_state=rng)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=0, scale=np.ones(5), random_state=rng)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5), random_state=rng)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5),
random_state=rng)
d = np.diff(vals.ravel())
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
[[1, 1], [1, 1]], 1)
assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
[[1], [2]], (4,))
def test_nomodify_gh9900_regression():
# Regression test for gh-9990
# Prior to gh-9990, calls to stats.truncnorm._cdf() use what ever was
# set inside the stats.truncnorm instance during stats.truncnorm.cdf().
# This could cause issues wth multi-threaded code.
# Since then, the calls to cdf() are not permitted to modify the global
# stats.truncnorm instance.
tn = stats.truncnorm
# Use the right-half truncated normal
# Check that the cdf and _cdf return the same result.
npt.assert_almost_equal(tn.cdf(1, 0, np.inf), 0.6826894921370859)
npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]), 0.6826894921370859)
# Now use the left-half truncated normal
npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), 0.31731050786291415)
npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]), 0.31731050786291415)
# Check that the right-half truncated normal _cdf hasn't changed
npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]), 0.6826894921370859) # noqa, NOT 1.6826894921370859
npt.assert_almost_equal(tn.cdf(1, 0, np.inf), 0.6826894921370859)
# Check that the left-half truncated normal _cdf hasn't changed
npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]), 0.31731050786291415) # noqa, Not -0.6826894921370859
npt.assert_almost_equal(tn.cdf(1, -np.inf, 0), 1) # Not 1.6826894921370859
npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), 0.31731050786291415) # Not -0.6826894921370859
def test_broadcast_gh9990_regression():
# Regression test for gh-9990
# The x-value 7 only lies within the support of 4 of the supplied
# distributions. Prior to 9990, one array passed to
# stats.reciprocal._cdf would have 4 elements, but an array
# previously stored by stats.reciprocal_argcheck() would have 6, leading
# to a broadcast error.
a = np.array([1, 2, 3, 4, 5, 6])
b = np.array([8, 16, 1, 32, 1, 48])
ans = [stats.reciprocal.cdf(7, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(7, a, b), ans)
ans = [stats.reciprocal.cdf(1, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(1, a, b), ans)
ans = [stats.reciprocal.cdf(_a, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(a, a, b), ans)
ans = [stats.reciprocal.cdf(_b, _a, _b) for _a, _b in zip(a,b)]
npt.assert_array_almost_equal(stats.reciprocal.cdf(b, a, b), ans)
def test_broadcast_gh7933_regression():
# Check broadcast works
stats.truncnorm.logpdf(
np.array([3.0, 2.0, 1.0]),
a=(1.5 - np.array([6.0, 5.0, 4.0])) / 3.0,
b=np.inf,
loc=np.array([6.0, 5.0, 4.0]),
scale=3.0
)
def test_gh2002_regression():
# Add a check that broadcast works in situations where only some
# x-values are compatible with some of the shape arguments.
x = np.r_[-2:2:101j]
a = np.r_[-np.ones(50), np.ones(51)]
expected = [stats.truncnorm.pdf(_x, _a, np.inf) for _x, _a in zip(x, a)]
ans = stats.truncnorm.pdf(x, a, np.inf)
npt.assert_array_almost_equal(ans, expected)
def test_gh1320_regression():
# Check that the first example from gh-1320 now works.
c = 2.62
stats.genextreme.ppf(0.5, np.array([[c], [c + 0.5]]))
# The other examples in gh-1320 appear to have stopped working
# some time ago.
# ans = stats.genextreme.moment(2, np.array([c, c + 0.5]))
# expected = np.array([25.50105963, 115.11191437])
# stats.genextreme.moment(5, np.array([[c], [c + 0.5]]))
# stats.genextreme.moment(5, np.array([c, c + 0.5]))
def test_method_of_moments():
# example from https://en.wikipedia.org/wiki/Method_of_moments_(statistics)
np.random.seed(1234)
x = [0, 0, 0, 0, 1]
a = 1/5 - 2*np.sqrt(3)/5
b = 1/5 + 2*np.sqrt(3)/5
# force use of method of moments (uniform.fit is overriden)
loc, scale = super(type(stats.uniform), stats.uniform).fit(x, method="MM")
npt.assert_almost_equal(loc, a, decimal=4)
npt.assert_almost_equal(loc+scale, b, decimal=4)
def check_sample_meanvar_(popmean, popvar, sample):
if np.isfinite(popmean):
check_sample_mean(sample, popmean)
if np.isfinite(popvar):
check_sample_var(sample, popvar)
def check_sample_mean(sample, popmean):
# Checks for unlikely difference between sample mean and population mean
prob = stats.ttest_1samp(sample, popmean).pvalue
assert prob > 0.01
def check_sample_var(sample, popvar):
# check that population mean lies within the CI bootstrapped from the
# sample. This used to be a chi-squared test for variance, but there were
# too many false positives
res = stats.bootstrap(
(sample,),
lambda x, axis: x.var(ddof=1, axis=axis),
confidence_level=0.995,
)
conf = res.confidence_interval
low, high = conf.low, conf.high
assert low <= popvar <= high
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn, arg, msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
def check_cdf_sf(distfn, arg, msg):
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
1.0 - distfn.sf([0.1, 0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_ppf_isf(distfn, arg, msg):
p = np.array([0.1, 0.9])
npt.assert_almost_equal(distfn.isf(p, *arg), distfn.ppf(1-p, *arg),
decimal=DECIMAL, err_msg=msg +
' - ppf-isf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or
# huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
msg += ' - cdf-pdf relationship'
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_pdf_logpdf_at_endpoints(distfn, args, msg):
# compares pdf with the log of the pdf at the (finite) end points
points = np.array([0, 1])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
msg += " - logsf-log(sf) relationship"
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
msg += " - logcdf-log(cdf) relationship"
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def check_ppf_broadcast(distfn, arg, msg):
# compares ppf for multiple argsets.
num_repeats = 5
args = [] * num_repeats
if arg:
args = [np.array([_] * num_repeats) for _ in arg]
median = distfn.ppf(0.5, *arg)
medians = distfn.ppf(0.5, *args)
msg += " - ppf multiple"
npt.assert_almost_equal(medians, [median] * num_repeats, decimal=7, err_msg=msg)
def check_distribution_rvs(dist, args, alpha, rvs):
# dist is either a cdf function or name of a distribution in scipy.stats.
# args are the args for scipy.stats.dist(*args)
# alpha is a significance level, ~0.01
# rvs is array_like of random variables
# test from scipy.stats.tests
# this version reuses existing random variables
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
# The rvs passed in failed the K-S test, which _could_ happen
# but is unlikely if alpha is small enough.
# Repeat the test with a new sample of rvs.
# Generate 1000 rvs, perform a K-S test that the new sample of rvs
# are distributed according to the distribution.
D, pval = stats.kstest(dist, dist, args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
def check_loc_scale(distfn, arg, m, v, msg):
# Make `loc` and `scale` arrays to catch bugs like gh-13580 where
# `loc` and `scale` arrays improperly broadcast with shapes.
loc, scale = np.array([10.0, 20.0]), np.array([10.0, 20.0])
mt, vt = distfn.stats(*arg, loc=loc, scale=scale)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
# fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
def check_retrieving_support(distfn, args):
loc, scale = 1, 2
supp = distfn.support(*args)
supp_loc_scale = distfn.support(*args, loc=loc, scale=scale)
npt.assert_almost_equal(np.array(supp)*scale + loc,
np.array(supp_loc_scale))
def check_fit_args(distfn, arg, rvs, method):
with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
sup.filter(category=RuntimeWarning,
message="The shape parameter of the erlang")
sup.filter(category=RuntimeWarning,
message="floating point number truncated")
vals = distfn.fit(rvs, method=method)
vals2 = distfn.fit(rvs, optimizer='powell', method=method)
# Only check the length of the return; accuracy tested in test_fit.py
npt.assert_(len(vals) == 2+len(arg))
npt.assert_(len(vals2) == 2+len(arg))
def check_fit_args_fix(distfn, arg, rvs, method):
with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
sup.filter(category=RuntimeWarning,
message="The shape parameter of the erlang")
vals = distfn.fit(rvs, floc=0, method=method)
vals2 = distfn.fit(rvs, fscale=1, method=method)
npt.assert_(len(vals) == 2+len(arg))
npt.assert_(vals[-2] == 0)
npt.assert_(vals2[-1] == 1)
npt.assert_(len(vals2) == 2+len(arg))
if len(arg) > 0:
vals3 = distfn.fit(rvs, f0=arg[0], method=method)
npt.assert_(len(vals3) == 2+len(arg))
npt.assert_(vals3[0] == arg[0])
if len(arg) > 1:
vals4 = distfn.fit(rvs, f1=arg[1], method=method)
npt.assert_(len(vals4) == 2+len(arg))
npt.assert_(vals4[1] == arg[1])
if len(arg) > 2:
vals5 = distfn.fit(rvs, f2=arg[2], method=method)
npt.assert_(len(vals5) == 2+len(arg))
npt.assert_(vals5[2] == arg[2])
@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
'sf', 'logsf', 'ppf', 'isf'])
@pytest.mark.parametrize('distname, args', distcont)
def test_methods_with_lists(method, distname, args):
# Test that the continuous distributions can accept Python lists
# as arguments.
dist = getattr(stats, distname)
f = getattr(dist, method)
if distname == 'invweibull' and method.startswith('log'):
x = [1.5, 2]
else:
x = [0.1, 0.2]
shape2 = [[a]*2 for a in args]
loc = [0, 0.1]
scale = [1, 1.01]
result = f(x, *shape2, loc=loc, scale=scale)
npt.assert_allclose(result,
[f(*v) for v in zip(x, *shape2, loc, scale)],
rtol=1e-14, atol=5e-14)
def test_burr_fisk_moment_gh13234_regression():
vals0 = stats.burr.moment(1, 5, 4)
assert isinstance(vals0, float)
vals1 = stats.fisk.moment(1, 8)
assert isinstance(vals1, float)
def test_moments_with_array_gh12192_regression():
# array loc and scalar scale
vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=1)
expected0 = np.array([1., 2., 3.])
npt.assert_equal(vals0, expected0)
# array loc and invalid scalar scale
vals1 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=-1)
expected1 = np.array([np.nan, np.nan, np.nan])
npt.assert_equal(vals1, expected1)
# array loc and array scale with invalid entries
vals2 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]),
scale=[-3, 1, 0])
expected2 = np.array([np.nan, 2., np.nan])
npt.assert_equal(vals2, expected2)
# (loc == 0) & (scale < 0)
vals3 = stats.norm.moment(order=2, loc=0, scale=-4)
expected3 = np.nan
npt.assert_equal(vals3, expected3)
assert isinstance(vals3, expected3.__class__)
# array loc with 0 entries and scale with invalid entries
vals4 = stats.norm.moment(order=2, loc=[1, 0, 2], scale=[3, -4, -5])
expected4 = np.array([10., np.nan, np.nan])
npt.assert_equal(vals4, expected4)
# all(loc == 0) & (array scale with invalid entries)
vals5 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[5., -2, 100.])
expected5 = np.array([25., np.nan, 10000.])
npt.assert_equal(vals5, expected5)
# all( (loc == 0) & (scale < 0) )
vals6 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[-5., -2, -100.])
expected6 = np.array([np.nan, np.nan, np.nan])
npt.assert_equal(vals6, expected6)
# scalar args, loc, and scale
vals7 = stats.chi.moment(order=2, df=1, loc=0, scale=0)
expected7 = np.nan
npt.assert_equal(vals7, expected7)
assert isinstance(vals7, expected7.__class__)
# array args, scalar loc, and scalar scale
vals8 = stats.chi.moment(order=2, df=[1, 2, 3], loc=0, scale=0)
expected8 = np.array([np.nan, np.nan, np.nan])
npt.assert_equal(vals8, expected8)
# array args, array loc, and array scale
vals9 = stats.chi.moment(order=2, df=[1, 2, 3], loc=[1., 0., 2.],
scale=[1., -3., 0.])
expected9 = np.array([3.59576912, np.nan, np.nan])
npt.assert_allclose(vals9, expected9, rtol=1e-8)
# (n > 4), all(loc != 0), and all(scale != 0)
vals10 = stats.norm.moment(5, [1., 2.], [1., 2.])
expected10 = np.array([26., 832.])
npt.assert_allclose(vals10, expected10, rtol=1e-13)
# test broadcasting and more
a = [-1.1, 0, 1, 2.2, np.pi]
b = [-1.1, 0, 1, 2.2, np.pi]
loc = [-1.1, 0, np.sqrt(2)]
scale = [-2.1, 0, 1, 2.2, np.pi]
a = np.array(a).reshape((-1, 1, 1, 1))
b = np.array(b).reshape((-1, 1, 1))
loc = np.array(loc).reshape((-1, 1))
scale = np.array(scale)
vals11 = stats.beta.moment(order=2, a=a, b=b, loc=loc, scale=scale)
a, b, loc, scale = np.broadcast_arrays(a, b, loc, scale)
for i in np.ndenumerate(a):
with np.errstate(invalid='ignore', divide='ignore'):
i = i[0] # just get the index
# check against same function with scalar input
expected = stats.beta.moment(order=2, a=a[i], b=b[i],
loc=loc[i], scale=scale[i])
np.testing.assert_equal(vals11[i], expected)
def test_broadcasting_in_moments_gh12192_regression():
vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=[[1]])
expected0 = np.array([[1., 2., 3.]])
npt.assert_equal(vals0, expected0)
assert vals0.shape == expected0.shape
vals1 = stats.norm.moment(order=1, loc=np.array([[1], [2], [3]]),
scale=[1, 2, 3])
expected1 = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]])
npt.assert_equal(vals1, expected1)
assert vals1.shape == expected1.shape
vals2 = stats.chi.moment(order=1, df=[1., 2., 3.], loc=0., scale=1.)
expected2 = np.array([0.79788456, 1.25331414, 1.59576912])
npt.assert_allclose(vals2, expected2, rtol=1e-8)
assert vals2.shape == expected2.shape
vals3 = stats.chi.moment(order=1, df=[[1.], [2.], [3.]], loc=[0., 1., 2.],
scale=[-1., 0., 3.])
expected3 = np.array([[np.nan, np.nan, 4.39365368],
[np.nan, np.nan, 5.75994241],
[np.nan, np.nan, 6.78730736]])
npt.assert_allclose(vals3, expected3, rtol=1e-8)
assert vals3.shape == expected3.shape
def test_kappa3_array_gh13582():
# https://github.com/scipy/scipy/pull/15140#issuecomment-994958241
shapes = [0.5, 1.5, 2.5, 3.5, 4.5]
moments = 'mvsk'
res = np.array([[stats.kappa3.stats(shape, moments=moment)
for shape in shapes] for moment in moments])
res2 = np.array(stats.kappa3.stats(shapes, moments=moments))
npt.assert_allclose(res, res2)
@pytest.mark.xslow
def test_kappa4_array_gh13582():
h = np.array([-0.5, 2.5, 3.5, 4.5, -3])
k = np.array([-0.5, 1, -1.5, 0, 3.5])
moments = 'mvsk'
res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
for i in range(5)] for moment in moments])
res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
npt.assert_allclose(res, res2)
# https://github.com/scipy/scipy/pull/15250#discussion_r775112913
h = np.array([-1, -1/4, -1/4, 1, -1, 0])
k = np.array([1, 1, 1/2, -1/3, -1, 0])
res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
for i in range(6)] for moment in moments])
res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
npt.assert_allclose(res, res2)
# https://github.com/scipy/scipy/pull/15250#discussion_r775115021
h = np.array([-1, -0.5, 1])
k = np.array([-1, -0.5, 0, 1])[:, None]
res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
assert res2.shape == (4, 4, 3)
def test_frozen_attributes():
# gh-14827 reported that all frozen distributions had both pmf and pdf
# attributes; continuous should have pdf and discrete should have pmf.
message = "'rv_continuous_frozen' object has no attribute"
with pytest.raises(AttributeError, match=message):
stats.norm().pmf
with pytest.raises(AttributeError, match=message):
stats.norm().logpmf
stats.norm.pmf = "herring"
frozen_norm = stats.norm()
assert isinstance(frozen_norm, rv_continuous_frozen)
delattr(stats.norm, 'pmf')
def test_skewnorm_pdf_gh16038():
rng = np.random.default_rng(0)
x, a = -np.inf, 0
npt.assert_equal(stats.skewnorm.pdf(x, a), stats.norm.pdf(x))
x, a = rng.random(size=(3, 3)), rng.random(size=(3, 3))
mask = rng.random(size=(3, 3)) < 0.5
a[mask] = 0
x_norm = x[mask]
res = stats.skewnorm.pdf(x, a)
npt.assert_equal(res[mask], stats.norm.pdf(x_norm))
npt.assert_equal(res[~mask], stats.skewnorm.pdf(x[~mask], a[~mask]))
# for scalar input, these functions should return scalar output
scalar_out = [['rvs', []], ['pdf', [0]], ['logpdf', [0]], ['cdf', [0]],
['logcdf', [0]], ['sf', [0]], ['logsf', [0]], ['ppf', [0]],
['isf', [0]], ['moment', [1]], ['entropy', []], ['expect', []],
['median', []], ['mean', []], ['std', []], ['var', []]]
scalars_out = [['interval', [0.95]], ['support', []], ['stats', ['mv']]]
@pytest.mark.parametrize('case', scalar_out + scalars_out)
def test_scalar_for_scalar(case):
# Some rv_continuous functions returned 0d array instead of NumPy scalar
# Guard against regression
method_name, args = case
method = getattr(stats.norm(), method_name)
res = method(*args)
if case in scalar_out:
assert isinstance(res, np.number)
else:
assert isinstance(res[0], np.number)
assert isinstance(res[1], np.number)
def test_scalar_for_scalar2():
# test methods that are not attributes of frozen distributions
res = stats.norm.fit([1, 2, 3])
assert isinstance(res[0], np.number)
assert isinstance(res[1], np.number)
res = stats.norm.fit_loc_scale([1, 2, 3])
assert isinstance(res[0], np.number)
assert isinstance(res[1], np.number)
res = stats.norm.nnlf((0, 1), [1, 2, 3])
assert isinstance(res, np.number)
| 41,529
| 40.200397
| 112
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_variation.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
from scipy.stats import variation
class TestVariation:
"""
Test class for scipy.stats.variation
"""
def test_ddof(self):
x = np.arange(9.0)
assert_allclose(variation(x, ddof=1), np.sqrt(60/8)/4)
@pytest.mark.parametrize('sgn', [1, -1])
def test_sign(self, sgn):
x = np.array([1, 2, 3, 4, 5])
v = variation(sgn*x)
expected = sgn*np.sqrt(2)/3
assert_allclose(v, expected, rtol=1e-10)
def test_scalar(self):
# A scalar is treated like a 1-d sequence with length 1.
assert_equal(variation(4.0), 0.0)
@pytest.mark.parametrize('nan_policy, expected',
[('propagate', np.nan),
('omit', np.sqrt(20/3)/4)])
def test_variation_nan(self, nan_policy, expected):
x = np.arange(10.)
x[9] = np.nan
assert_allclose(variation(x, nan_policy=nan_policy), expected)
def test_nan_policy_raise(self):
x = np.array([1.0, 2.0, np.nan, 3.0])
with pytest.raises(ValueError, match='input contains nan'):
variation(x, nan_policy='raise')
def test_bad_nan_policy(self):
with pytest.raises(ValueError, match='must be one of'):
variation([1, 2, 3], nan_policy='foobar')
def test_keepdims(self):
x = np.arange(10).reshape(2, 5)
y = variation(x, axis=1, keepdims=True)
expected = np.array([[np.sqrt(2)/2],
[np.sqrt(2)/7]])
assert_allclose(y, expected)
@pytest.mark.parametrize('axis, expected',
[(0, np.empty((1, 0))),
(1, np.full((5, 1), fill_value=np.nan))])
def test_keepdims_size0(self, axis, expected):
x = np.zeros((5, 0))
y = variation(x, axis=axis, keepdims=True)
assert_equal(y, expected)
@pytest.mark.parametrize('incr, expected_fill', [(0, np.inf), (1, np.nan)])
def test_keepdims_and_ddof_eq_len_plus_incr(self, incr, expected_fill):
x = np.array([[1, 1, 2, 2], [1, 2, 3, 3]])
y = variation(x, axis=1, ddof=x.shape[1] + incr, keepdims=True)
assert_equal(y, np.full((2, 1), fill_value=expected_fill))
def test_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
v = variation(a, axis=1, nan_policy="propagate")
assert_allclose(v, [np.sqrt(5/4)/1.5, np.nan], atol=1e-15)
def test_axis_none(self):
# Check that `variation` computes the result on the flattened
# input when axis is None.
y = variation([[0, 1], [2, 3]], axis=None)
assert_allclose(y, np.sqrt(5/4)/1.5)
def test_bad_axis(self):
# Check that an invalid axis raises np.AxisError.
x = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(np.AxisError):
variation(x, axis=10)
def test_mean_zero(self):
# Check that `variation` returns inf for a sequence that is not
# identically zero but whose mean is zero.
x = np.array([10, -3, 1, -4, -4])
y = variation(x)
assert_equal(y, np.inf)
x2 = np.array([x, -10*x])
y2 = variation(x2, axis=1)
assert_equal(y2, [np.inf, np.inf])
@pytest.mark.parametrize('x', [np.zeros(5), [], [1, 2, np.inf, 9]])
def test_return_nan(self, x):
# Test some cases where `variation` returns nan.
y = variation(x)
assert_equal(y, np.nan)
@pytest.mark.parametrize('axis, expected',
[(0, []), (1, [np.nan]*3), (None, np.nan)])
def test_2d_size_zero_with_axis(self, axis, expected):
x = np.empty((3, 0))
y = variation(x, axis=axis)
assert_equal(y, expected)
def test_neg_inf(self):
# Edge case that produces -inf: ddof equals the number of non-nan
# values, the values are not constant, and the mean is negative.
x1 = np.array([-3, -5])
assert_equal(variation(x1, ddof=2), -np.inf)
x2 = np.array([[np.nan, 1, -10, np.nan],
[-20, -3, np.nan, np.nan]])
assert_equal(variation(x2, axis=1, ddof=2, nan_policy='omit'),
[-np.inf, -np.inf])
@pytest.mark.parametrize("nan_policy", ['propagate', 'omit'])
def test_combined_edge_cases(self, nan_policy):
x = np.array([[0, 10, np.nan, 1],
[0, -5, np.nan, 2],
[0, -5, np.nan, 3]])
y = variation(x, axis=0, nan_policy=nan_policy)
assert_allclose(y, [np.nan, np.inf, np.nan, np.sqrt(2/3)/2])
@pytest.mark.parametrize(
'ddof, expected',
[(0, [np.sqrt(1/6), np.sqrt(5/8), np.inf, 0, np.nan, 0.0, np.nan]),
(1, [0.5, np.sqrt(5/6), np.inf, 0, np.nan, 0, np.nan]),
(2, [np.sqrt(0.5), np.sqrt(5/4), np.inf, np.nan, np.nan, 0, np.nan])]
)
def test_more_nan_policy_omit_tests(self, ddof, expected):
# The slightly strange formatting in the follow array is my attempt to
# maintain a clean tabular arrangement of the data while satisfying
# the demands of pycodestyle. Currently, E201 and E241 are not
# disabled by the `# noqa` annotation.
nan = np.nan
x = np.array([[1.0, 2.0, nan, 3.0],
[0.0, 4.0, 3.0, 1.0],
[nan, -.5, 0.5, nan],
[nan, 9.0, 9.0, nan],
[nan, nan, nan, nan],
[3.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 0.0]])
v = variation(x, axis=1, ddof=ddof, nan_policy='omit')
assert_allclose(v, expected)
def test_variation_ddof(self):
# test variation with delta degrees of freedom
# regression test for gh-13341
a = np.array([1, 2, 3, 4, 5])
nan_a = np.array([1, 2, 3, np.nan, 4, 5, np.nan])
y = variation(a, ddof=1)
nan_y = variation(nan_a, nan_policy="omit", ddof=1)
assert_allclose(y, np.sqrt(5/2)/3)
assert y == nan_y
| 6,245
| 38.283019
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/common_tests.py
|
import pickle
import numpy as np
import numpy.testing as npt
from numpy.testing import assert_allclose, assert_equal
from pytest import raises as assert_raises
import numpy.ma.testutils as ma_npt
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from scipy import stats
def check_named_results(res, attributes, ma=False):
for i, attr in enumerate(attributes):
if ma:
ma_npt.assert_equal(res[i], getattr(res, attr))
else:
npt.assert_equal(res[i], getattr(res, attr))
def check_normalization(distfn, args, distname):
norm_moment = distfn.moment(0, *args)
npt.assert_allclose(norm_moment, 1.0)
if distname == "rv_histogram_instance":
atol, rtol = 1e-5, 0
else:
atol, rtol = 1e-7, 1e-7
normalization_expect = distfn.expect(lambda x: 1, args=args)
npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol,
err_msg=distname, verbose=True)
_a, _b = distfn.support(*args)
normalization_cdf = distfn.cdf(_b, *args)
npt.assert_allclose(normalization_cdf, 1.0)
def check_moment(distfn, arg, m, v, msg):
m1 = distfn.moment(1, *arg)
m2 = distfn.moment(2, *arg)
if not np.isinf(m):
npt.assert_almost_equal(m1, m, decimal=10,
err_msg=msg + ' - 1st moment')
else: # or np.isnan(m1),
npt.assert_(np.isinf(m1),
msg + ' - 1st moment -infinite, m1=%s' % str(m1))
if not np.isinf(v):
npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10,
err_msg=msg + ' - 2ndt moment')
else: # or np.isnan(m2),
npt.assert_(np.isinf(m2), msg + f' - 2nd moment -infinite, {m2=}')
def check_mean_expect(distfn, arg, m, msg):
if np.isfinite(m):
m1 = distfn.expect(lambda x: x, arg)
npt.assert_almost_equal(m1, m, decimal=5,
err_msg=msg + ' - 1st moment (expect)')
def check_var_expect(distfn, arg, m, v, msg):
dist_looser_tolerances = {"rv_histogram_instance" , "ksone"}
kwargs = {'rtol': 5e-6} if msg in dist_looser_tolerances else {}
if np.isfinite(v):
m2 = distfn.expect(lambda x: x*x, arg)
npt.assert_allclose(m2, v + m*m, **kwargs)
def check_skew_expect(distfn, arg, m, v, s, msg):
if np.isfinite(s):
m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
decimal=5, err_msg=msg + ' - skew')
else:
npt.assert_(np.isnan(s))
def check_kurt_expect(distfn, arg, m, v, k, msg):
if np.isfinite(k):
m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2),
atol=1e-5, rtol=1e-5,
err_msg=msg + ' - kurtosis')
elif not np.isposinf(k):
npt.assert_(np.isnan(k))
def check_munp_expect(dist, args, msg):
# If _munp is overridden, test a higher moment. (Before gh-18634, some
# distributions had issues with moments 5 and higher.)
if dist._munp.__func__ != stats.rv_continuous._munp:
res = dist.moment(5, *args) # shouldn't raise an error
ref = dist.expect(lambda x: x ** 5, args, lb=-np.inf, ub=np.inf)
if not np.isfinite(res): # could be valid; automated test can't know
return
# loose tolerance, mostly to see whether _munp returns *something*
assert_allclose(res, ref, atol=1e-10, rtol=1e-4,
err_msg=msg + ' - higher moment / _munp')
def check_entropy(distfn, arg, msg):
ent = distfn.entropy(*arg)
npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
def check_private_entropy(distfn, args, superclass):
# compare a generic _entropy with the distribution-specific implementation
npt.assert_allclose(distfn._entropy(*args),
superclass._entropy(distfn, *args))
def check_entropy_vect_scale(distfn, arg):
# check 2-d
sc = np.asarray([[1, 2], [3, 4]])
v_ent = distfn.entropy(*arg, scale=sc)
s_ent = [distfn.entropy(*arg, scale=s) for s in sc.ravel()]
s_ent = np.asarray(s_ent).reshape(v_ent.shape)
assert_allclose(v_ent, s_ent, atol=1e-14)
# check invalid value, check cast
sc = [1, 2, -3]
v_ent = distfn.entropy(*arg, scale=sc)
s_ent = [distfn.entropy(*arg, scale=s) for s in sc]
s_ent = np.asarray(s_ent).reshape(v_ent.shape)
assert_allclose(v_ent, s_ent, atol=1e-14)
def check_edge_support(distfn, args):
# Make sure that x=self.a and self.b are handled correctly.
x = distfn.support(*args)
if isinstance(distfn, stats.rv_discrete):
x = x[0]-1, x[1]
npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0])
npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0])
if distfn.name not in ('skellam', 'dlaplace'):
# with a = -inf, log(0) generates warnings
npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0])
npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf])
npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x)
npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1])
# out-of-bounds for isf & ppf
npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all())
npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all())
def check_named_args(distfn, x, shape_args, defaults, meths):
## Check calling w/ named arguments.
# check consistency of shapes, numargs and _parse signature
signature = _getfullargspec(distfn._parse_args)
npt.assert_(signature.varargs is None)
npt.assert_(signature.varkw is None)
npt.assert_(not signature.kwonlyargs)
npt.assert_(list(signature.defaults) == list(defaults))
shape_argnames = signature.args[:-len(defaults)] # a, b, loc=0, scale=1
if distfn.shapes:
shapes_ = distfn.shapes.replace(',', ' ').split()
else:
shapes_ = ''
npt.assert_(len(shapes_) == distfn.numargs)
npt.assert_(len(shapes_) == len(shape_argnames))
# check calling w/ named arguments
shape_args = list(shape_args)
vals = [meth(x, *shape_args) for meth in meths]
npt.assert_(np.all(np.isfinite(vals)))
names, a, k = shape_argnames[:], shape_args[:], {}
while names:
k.update({names.pop(): a.pop()})
v = [meth(x, *a, **k) for meth in meths]
npt.assert_array_equal(vals, v)
if 'n' not in k.keys():
# `n` is first parameter of moment(), so can't be used as named arg
npt.assert_equal(distfn.moment(1, *a, **k),
distfn.moment(1, *shape_args))
# unknown arguments should not go through:
k.update({'kaboom': 42})
assert_raises(TypeError, distfn.cdf, x, **k)
def check_random_state_property(distfn, args):
# check the random_state attribute of a distribution *instance*
# This test fiddles with distfn.random_state. This breaks other tests,
# hence need to save it and then restore.
rndm = distfn.random_state
# baseline: this relies on the global state
np.random.seed(1234)
distfn.random_state = None
r0 = distfn.rvs(*args, size=8)
# use an explicit instance-level random_state
distfn.random_state = 1234
r1 = distfn.rvs(*args, size=8)
npt.assert_equal(r0, r1)
distfn.random_state = np.random.RandomState(1234)
r2 = distfn.rvs(*args, size=8)
npt.assert_equal(r0, r2)
# check that np.random.Generator can be used (numpy >= 1.17)
if hasattr(np.random, 'default_rng'):
# obtain a np.random.Generator object
rng = np.random.default_rng(1234)
distfn.rvs(*args, size=1, random_state=rng)
# can override the instance-level random_state for an individual .rvs call
distfn.random_state = 2
orig_state = distfn.random_state.get_state()
r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234))
npt.assert_equal(r0, r3)
# ... and that does not alter the instance-level random_state!
npt.assert_equal(distfn.random_state.get_state(), orig_state)
# finally, restore the random_state
distfn.random_state = rndm
def check_meth_dtype(distfn, arg, meths):
q0 = [0.25, 0.5, 0.75]
x0 = distfn.ppf(q0, *arg)
x_cast = [x0.astype(tp) for tp in (np.int_, np.float16, np.float32,
np.float64)]
for x in x_cast:
# casting may have clipped the values, exclude those
distfn._argcheck(*arg)
x = x[(distfn.a < x) & (x < distfn.b)]
for meth in meths:
val = meth(x, *arg)
npt.assert_(val.dtype == np.float_)
def check_ppf_dtype(distfn, arg):
q0 = np.asarray([0.25, 0.5, 0.75])
q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)]
for q in q_cast:
for meth in [distfn.ppf, distfn.isf]:
val = meth(q, *arg)
npt.assert_(val.dtype == np.float_)
def check_cmplx_deriv(distfn, arg):
# Distributions allow complex arguments.
def deriv(f, x, *arg):
x = np.asarray(x)
h = 1e-10
return (f(x + h*1j, *arg)/h).imag
x0 = distfn.ppf([0.25, 0.51, 0.75], *arg)
x_cast = [x0.astype(tp) for tp in (np.int_, np.float16, np.float32,
np.float64)]
for x in x_cast:
# casting may have clipped the values, exclude those
distfn._argcheck(*arg)
x = x[(distfn.a < x) & (x < distfn.b)]
pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg)
assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5)
assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5)
assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5)
assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5)
assert_allclose(deriv(distfn.logpdf, x, *arg),
deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg),
rtol=1e-5)
def check_pickling(distfn, args):
# check that a distribution instance pickles and unpickles
# pay special attention to the random_state property
# save the random_state (restore later)
rndm = distfn.random_state
# check unfrozen
distfn.random_state = 1234
distfn.rvs(*args, size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(*args, size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(*args, size=8)
npt.assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)]
npt.assert_equal(medians[0], medians[1])
npt.assert_equal(distfn.cdf(medians[0], *args),
unpickled.cdf(medians[1], *args))
# check frozen pickling/unpickling with rvs
frozen_dist = distfn(*args)
pkl = pickle.dumps(frozen_dist)
unpickled = pickle.loads(pkl)
r0 = frozen_dist.rvs(size=8)
r1 = unpickled.rvs(size=8)
npt.assert_equal(r0, r1)
# check pickling/unpickling of .fit method
if hasattr(distfn, "fit"):
fit_function = distfn.fit
pickled_fit_function = pickle.dumps(fit_function)
unpickled_fit_function = pickle.loads(pickled_fit_function)
assert fit_function.__name__ == unpickled_fit_function.__name__ == "fit"
# restore the random_state
distfn.random_state = rndm
def check_freezing(distfn, args):
# regression test for gh-11089: freezing a distribution fails
# if loc and/or scale are specified
if isinstance(distfn, stats.rv_continuous):
locscale = {'loc': 1, 'scale': 2}
else:
locscale = {'loc': 1}
rv = distfn(*args, **locscale)
assert rv.a == distfn(*args).a
assert rv.b == distfn(*args).b
def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype):
np.random.seed(123)
sample = distfunc.rvs(*allargs)
assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname)
if not shape_only:
rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype)
np.random.seed(123)
expected = rvs(*allargs)
assert_allclose(sample, expected, rtol=1e-13)
| 12,269
| 34.057143
| 83
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_entropy.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose
# avoid new uses of the following; prefer assert/np.testing.assert_allclose
from numpy.testing import (assert_, assert_almost_equal,
assert_array_almost_equal)
import pytest
from pytest import raises as assert_raises
import scipy.stats as stats
class TestEntropy:
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def test_entropy_base_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=1),
[0.63651417, 0.63651417, 0.66156324])
def test_entropy_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=1),
[0.231049, 0.231049, 0.127706])
def test_entropy_raises_value_error(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.1, 0.2], [0.6, 0.3]]
assert_raises(ValueError, stats.entropy, pk, qk)
def test_base_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=0),
stats.entropy(pk))
def test_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=0),
stats.entropy(pk, qk))
def test_base_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
assert_array_almost_equal(stats.entropy(pk.T).T,
stats.entropy(pk, axis=1))
def test_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,
stats.entropy(pk, qk, axis=1))
def test_entropy_broadcasting(self):
np.random.rand(0)
x = np.random.rand(3)
y = np.random.rand(2, 1)
res = stats.entropy(x, y, axis=-1)
assert_equal(res[0], stats.entropy(x, y[0]))
assert_equal(res[1], stats.entropy(x, y[1]))
def test_entropy_shape_mismatch(self):
x = np.random.rand(10, 1, 12)
y = np.random.rand(11, 2)
message = "shape mismatch: objects cannot be broadcast"
with pytest.raises(ValueError, match=message):
stats.entropy(x, y)
def test_input_validation(self):
x = np.random.rand(10)
message = "`base` must be a positive number."
with pytest.raises(ValueError, match=message):
stats.entropy(x, base=-2)
class TestDifferentialEntropy:
"""
Vasicek results are compared with the R package vsgoftest.
# library(vsgoftest)
#
# samp <- c(<values>)
# entropy.estimate(x = samp, window = <window_length>)
"""
def test_differential_entropy_vasicek(self):
random_state = np.random.RandomState(0)
values = random_state.standard_normal(100)
entropy = stats.differential_entropy(values, method='vasicek')
assert_allclose(entropy, 1.342551, rtol=1e-6)
entropy = stats.differential_entropy(values, window_length=1,
method='vasicek')
assert_allclose(entropy, 1.122044, rtol=1e-6)
entropy = stats.differential_entropy(values, window_length=8,
method='vasicek')
assert_allclose(entropy, 1.349401, rtol=1e-6)
def test_differential_entropy_vasicek_2d_nondefault_axis(self):
random_state = np.random.RandomState(0)
values = random_state.standard_normal((3, 100))
entropy = stats.differential_entropy(values, axis=1, method='vasicek')
assert_allclose(
entropy,
[1.342551, 1.341826, 1.293775],
rtol=1e-6,
)
entropy = stats.differential_entropy(values, axis=1, window_length=1,
method='vasicek')
assert_allclose(
entropy,
[1.122044, 1.102944, 1.129616],
rtol=1e-6,
)
entropy = stats.differential_entropy(values, axis=1, window_length=8,
method='vasicek')
assert_allclose(
entropy,
[1.349401, 1.338514, 1.292332],
rtol=1e-6,
)
def test_differential_entropy_raises_value_error(self):
random_state = np.random.RandomState(0)
values = random_state.standard_normal((3, 100))
error_str = (
r"Window length \({window_length}\) must be positive and less "
r"than half the sample size \({sample_size}\)."
)
sample_size = values.shape[1]
for window_length in {-1, 0, sample_size//2, sample_size}:
formatted_error_str = error_str.format(
window_length=window_length,
sample_size=sample_size,
)
with assert_raises(ValueError, match=formatted_error_str):
stats.differential_entropy(
values,
window_length=window_length,
axis=1,
)
def test_base_differential_entropy_with_axis_0_is_equal_to_default(self):
random_state = np.random.RandomState(0)
values = random_state.standard_normal((100, 3))
entropy = stats.differential_entropy(values, axis=0)
default_entropy = stats.differential_entropy(values)
assert_allclose(entropy, default_entropy)
def test_base_differential_entropy_transposed(self):
random_state = np.random.RandomState(0)
values = random_state.standard_normal((3, 100))
assert_allclose(
stats.differential_entropy(values.T).T,
stats.differential_entropy(values, axis=1),
)
def test_input_validation(self):
x = np.random.rand(10)
message = "`base` must be a positive number or `None`."
with pytest.raises(ValueError, match=message):
stats.differential_entropy(x, base=-2)
message = "`method` must be one of..."
with pytest.raises(ValueError, match=message):
stats.differential_entropy(x, method='ekki-ekki')
@pytest.mark.parametrize('method', ['vasicek', 'van es',
'ebrahimi', 'correa'])
def test_consistency(self, method):
# test that method is a consistent estimator
n = 10000 if method == 'correa' else 1000000
rvs = stats.norm.rvs(size=n, random_state=0)
expected = stats.norm.entropy()
res = stats.differential_entropy(rvs, method=method)
assert_allclose(res, expected, rtol=0.005)
# values from differential_entropy reference [6], table 1, n=50, m=7
norm_rmse_std_cases = { # method: (RMSE, STD)
'vasicek': (0.198, 0.109),
'van es': (0.212, 0.110),
'correa': (0.135, 0.112),
'ebrahimi': (0.128, 0.109)
}
@pytest.mark.parametrize('method, expected',
list(norm_rmse_std_cases.items()))
def test_norm_rmse_std(self, method, expected):
# test that RMSE and standard deviation of estimators matches values
# given in differential_entropy reference [6]. Incidentally, also
# tests vectorization.
reps, n, m = 10000, 50, 7
rmse_expected, std_expected = expected
rvs = stats.norm.rvs(size=(reps, n), random_state=0)
true_entropy = stats.norm.entropy()
res = stats.differential_entropy(rvs, window_length=m,
method=method, axis=-1)
assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)),
rmse_expected, atol=0.005)
assert_allclose(np.std(res), std_expected, atol=0.002)
# values from differential_entropy reference [6], table 2, n=50, m=7
expon_rmse_std_cases = { # method: (RMSE, STD)
'vasicek': (0.194, 0.148),
'van es': (0.179, 0.149),
'correa': (0.155, 0.152),
'ebrahimi': (0.151, 0.148)
}
@pytest.mark.parametrize('method, expected',
list(expon_rmse_std_cases.items()))
def test_expon_rmse_std(self, method, expected):
# test that RMSE and standard deviation of estimators matches values
# given in differential_entropy reference [6]. Incidentally, also
# tests vectorization.
reps, n, m = 10000, 50, 7
rmse_expected, std_expected = expected
rvs = stats.expon.rvs(size=(reps, n), random_state=0)
true_entropy = stats.expon.entropy()
res = stats.differential_entropy(rvs, window_length=m,
method=method, axis=-1)
assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)),
rmse_expected, atol=0.005)
assert_allclose(np.std(res), std_expected, atol=0.002)
@pytest.mark.parametrize('n, method', [(8, 'van es'),
(12, 'ebrahimi'),
(1001, 'vasicek')])
def test_method_auto(self, n, method):
rvs = stats.norm.rvs(size=(n,), random_state=0)
res1 = stats.differential_entropy(rvs)
res2 = stats.differential_entropy(rvs, method=method)
assert res1 == res2
| 11,277
| 38.296167
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_discrete_basic.py
|
import numpy.testing as npt
from numpy.testing import assert_allclose
import numpy as np
import pytest
from scipy import stats
from .common_tests import (check_normalization, check_moment,
check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_edge_support,
check_named_args, check_random_state_property,
check_pickling, check_rvs_broadcast,
check_freezing,)
from scipy.stats._distr_params import distdiscrete, invdistdiscrete
from scipy.stats._distn_infrastructure import rv_discrete_frozen
vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
distdiscrete += [[stats.rv_discrete(values=vals), ()]]
# For these distributions, test_discrete_basic only runs with test mode full
distslow = {'zipfian', 'nhypergeom'}
def cases_test_discrete_basic():
seen = set()
for distname, arg in distdiscrete:
if distname in distslow:
yield pytest.param(distname, arg, distname, marks=pytest.mark.slow)
else:
yield distname, arg, distname not in seen
seen.add(distname)
@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic())
def test_discrete_basic(distname, arg, first_case):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
np.random.seed(9765456)
rvs = distfn.rvs(size=2000, *arg)
supp = np.unique(rvs)
m, v = distfn.stats(*arg)
check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')
check_pmf_cdf(distfn, arg, distname)
check_oth(distfn, arg, supp, distname + ' oth')
check_edge_support(distfn, arg)
alpha = 0.01
check_discrete_chisquare(distfn, arg, rvs, alpha,
distname + ' chisquare')
if first_case:
locscale_defaults = (0,)
meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
# for some distributions, this needs to be overridden
spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0,
'nchypergeom_wallenius': 6}
k = spec_k.get(distname, 1)
check_named_args(distfn, k, arg, locscale_defaults, meths)
if distname != 'sample distribution':
check_scale_docstring(distfn)
check_random_state_property(distfn, arg)
check_pickling(distfn, arg)
check_freezing(distfn, arg)
# Entropy
check_entropy(distfn, arg, distname)
if distfn.__class__._entropy != stats.rv_discrete._entropy:
check_private_entropy(distfn, arg, stats.rv_discrete)
@pytest.mark.parametrize('distname,arg', distdiscrete)
def test_moments(distname, arg):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
m, v, s, k = distfn.stats(*arg, moments='mvsk')
check_normalization(distfn, arg, distname)
# compare `stats` and `moment` methods
check_moment(distfn, arg, m, v, distname)
check_mean_expect(distfn, arg, m, distname)
check_var_expect(distfn, arg, m, v, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
if distname not in ['zipf', 'yulesimon']:
check_kurt_expect(distfn, arg, m, v, k, distname)
# frozen distr moments
check_moment_frozen(distfn, arg, m, 1)
check_moment_frozen(distfn, arg, v+m*m, 2)
@pytest.mark.parametrize('dist,shape_args', distdiscrete)
def test_rvs_broadcast(dist, shape_args):
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace',
'nchypergeom_fisher', 'nchypergeom_wallenius']
try:
distfunc = getattr(stats, dist)
except TypeError:
distfunc = dist
dist = f'rv_discrete(values=({dist.xk!r}, {dist.pk!r}))'
loc = np.zeros(2)
nargs = distfunc.numargs
allargs = []
bshape = []
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 3,) + (1,)*(k + 1)
param_val = shape_args[k]
allargs.append(np.full(shp, param_val))
bshape.insert(0, shp[0])
allargs.append(loc)
bshape.append(loc.size)
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_])
@pytest.mark.parametrize('dist,args', distdiscrete)
def test_ppf_with_loc(dist, args):
try:
distfn = getattr(stats, dist)
except TypeError:
distfn = dist
#check with a negative, no and positive relocation.
np.random.seed(1942349)
re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
_a, _b = distfn.support(*args)
for loc in re_locs:
npt.assert_array_equal(
[_a-1+loc, _b+loc],
[distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)]
)
@pytest.mark.parametrize('dist, args', distdiscrete)
def test_isf_with_loc(dist, args):
try:
distfn = getattr(stats, dist)
except TypeError:
distfn = dist
# check with a negative, no and positive relocation.
np.random.seed(1942349)
re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
_a, _b = distfn.support(*args)
for loc in re_locs:
expected = _b + loc, _a - 1 + loc
res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
npt.assert_array_equal(expected, res)
# test broadcasting behaviour
re_locs = [np.random.randint(-10, -1, size=(5, 3)),
np.zeros((5, 3)),
np.random.randint(1, 10, size=(5, 3))]
_a, _b = distfn.support(*args)
for loc in re_locs:
expected = _b + loc, _a - 1 + loc
res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
npt.assert_array_equal(expected, res)
def check_cdf_ppf(distfn, arg, supp, msg):
# supp is assumed to be an array of integers in the support of distfn
# (but not necessarily all the integers in the support).
# This test assumes that the PMF of any value in the support of the
# distribution is greater than 1e-8.
# cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
cdf_supp = distfn.cdf(supp, *arg)
# In very rare cases, the finite precision calculation of ppf(cdf(supp))
# can produce an array in which an element is off by one. We nudge the
# CDF values down by 15 ULPs help to avoid this.
cdf_supp0 = cdf_supp - 15*np.spacing(cdf_supp)
npt.assert_array_equal(distfn.ppf(cdf_supp0, *arg),
supp, msg + '-roundtrip')
# Repeat the same calculation, but with the CDF values decreased by 1e-8.
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
supp, msg + '-roundtrip')
if not hasattr(distfn, 'xk'):
_a, _b = distfn.support(*arg)
supp1 = supp[supp < _b]
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
supp1 + distfn.inc, msg + ' ppf-cdf-next')
def check_pmf_cdf(distfn, arg, distname):
if hasattr(distfn, 'xk'):
index = distfn.xk
else:
startind = int(distfn.ppf(0.01, *arg) - 1)
index = list(range(startind, startind + 10))
cdfs = distfn.cdf(index, *arg)
pmfs_cum = distfn.pmf(index, *arg).cumsum()
atol, rtol = 1e-10, 1e-10
if distname == 'skellam': # ncx2 accuracy
atol, rtol = 1e-5, 1e-5
npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
atol=atol, rtol=rtol)
# also check that pmf at non-integral k is zero
k = np.asarray(index)
k_shifted = k[:-1] + np.diff(k)/2
npt.assert_equal(distfn.pmf(k_shifted, *arg), 0)
# better check frozen distributions, and also when loc != 0
loc = 0.5
dist = distfn(loc=loc, *arg)
npt.assert_allclose(dist.pmf(k[1:] + loc), np.diff(dist.cdf(k + loc)))
npt.assert_equal(dist.pmf(k_shifted + loc), 0)
def check_moment_frozen(distfn, arg, m, k):
npt.assert_allclose(distfn(*arg).moment(k), m,
atol=1e-10, rtol=1e-10)
def check_oth(distfn, arg, supp, msg):
# checking other methods of distfn
npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
atol=1e-10, rtol=1e-10)
q = np.linspace(0.01, 0.99, 20)
npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
atol=1e-10, rtol=1e-10)
median_sf = distfn.isf(0.5, *arg)
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
"""Perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
"""
wsupp = 0.05
# construct intervals with minimum mass `wsupp`.
# intervals are left-half-open as in a cdf difference
_a, _b = distfn.support(*arg)
lo = int(max(_a, -1000))
high = int(min(_b, 1000)) + 1
distsupport = range(lo, high)
last = 0
distsupp = [lo]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii, *arg)
if current - last >= wsupp - 1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1 - wsupp):
break
if distsupp[-1] < _b:
distsupp.append(_b)
distmass.append(1 - last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp + 1e-8
histsupp[0] = _a
# find sample frequencies and perform chisquare test
freq, hsupp = np.histogram(rvs, histsupp)
chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
npt.assert_(pval > alpha,
'chisquare - test for %s at arg = %s with pval = %s' %
(msg, str(arg), str(pval)))
def check_scale_docstring(distfn):
if distfn.__doc__ is not None:
# Docstrings can be stripped if interpreter is run with -OO
npt.assert_('scale' not in distfn.__doc__)
@pytest.mark.parametrize('method', ['pmf', 'logpmf', 'cdf', 'logcdf',
'sf', 'logsf', 'ppf', 'isf'])
@pytest.mark.parametrize('distname, args', distdiscrete)
def test_methods_with_lists(method, distname, args):
# Test that the discrete distributions can accept Python lists
# as arguments.
try:
dist = getattr(stats, distname)
except TypeError:
return
if method in ['ppf', 'isf']:
z = [0.1, 0.2]
else:
z = [0, 1]
p2 = [[p]*2 for p in args]
loc = [0, 1]
result = dist.pmf(z, *p2, loc=loc)
npt.assert_allclose(result,
[dist.pmf(*v) for v in zip(z, *p2, loc)],
rtol=1e-15, atol=1e-15)
@pytest.mark.parametrize('distname, args', invdistdiscrete)
def test_cdf_gh13280_regression(distname, args):
# Test for nan output when shape parameters are invalid
dist = getattr(stats, distname)
x = np.arange(-2, 15)
vals = dist.cdf(x, *args)
expected = np.nan
npt.assert_equal(vals, expected)
def cases_test_discrete_integer_shapes():
# distributions parameters that are only allowed to be integral when
# fitting, but are allowed to be real as input to PDF, etc.
integrality_exceptions = {'nbinom': {'n'}}
seen = set()
for distname, shapes in distdiscrete:
if distname in seen:
continue
seen.add(distname)
try:
dist = getattr(stats, distname)
except TypeError:
continue
shape_info = dist._shape_info()
for i, shape in enumerate(shape_info):
if (shape.name in integrality_exceptions.get(distname, set()) or
not shape.integrality):
continue
yield distname, shape.name, shapes
@pytest.mark.parametrize('distname, shapename, shapes',
cases_test_discrete_integer_shapes())
def test_integer_shapes(distname, shapename, shapes):
dist = getattr(stats, distname)
shape_info = dist._shape_info()
shape_names = [shape.name for shape in shape_info]
i = shape_names.index(shapename) # this element of params must be integral
shapes_copy = list(shapes)
valid_shape = shapes[i]
invalid_shape = valid_shape - 0.5 # arbitrary non-integral value
new_valid_shape = valid_shape - 1
shapes_copy[i] = [[valid_shape], [invalid_shape], [new_valid_shape]]
a, b = dist.support(*shapes)
x = np.round(np.linspace(a, b, 5))
pmf = dist.pmf(x, *shapes_copy)
assert not np.any(np.isnan(pmf[0, :]))
assert np.all(np.isnan(pmf[1, :]))
assert not np.any(np.isnan(pmf[2, :]))
def test_frozen_attributes():
# gh-14827 reported that all frozen distributions had both pmf and pdf
# attributes; continuous should have pdf and discrete should have pmf.
message = "'rv_discrete_frozen' object has no attribute"
with pytest.raises(AttributeError, match=message):
stats.binom(10, 0.5).pdf
with pytest.raises(AttributeError, match=message):
stats.binom(10, 0.5).logpdf
stats.binom.pdf = "herring"
frozen_binom = stats.binom(10, 0.5)
assert isinstance(frozen_binom, rv_discrete_frozen)
delattr(stats.binom, 'pdf')
@pytest.mark.parametrize('distname, shapes', distdiscrete)
def test_interval(distname, shapes):
# gh-11026 reported that `interval` returns incorrect values when
# `confidence=1`. The values were not incorrect, but it was not intuitive
# that the left end of the interval should extend beyond the support of the
# distribution. Confirm that this is the behavior for all distributions.
if isinstance(distname, str):
dist = getattr(stats, distname)
else:
dist = distname
a, b = dist.support(*shapes)
npt.assert_equal(dist.ppf([0, 1], *shapes), (a-1, b))
npt.assert_equal(dist.isf([1, 0], *shapes), (a-1, b))
npt.assert_equal(dist.interval(1, *shapes), (a-1, b))
@pytest.mark.xfail_on_32bit("Sensible to machine precision")
def test_rv_sample():
# Thoroughly test rv_sample and check that gh-3758 is resolved
# Generate a random discrete distribution
rng = np.random.default_rng(98430143469)
xk = np.sort(rng.random(10) * 10)
pk = rng.random(10)
pk /= np.sum(pk)
dist = stats.rv_discrete(values=(xk, pk))
# Generate points to the left and right of xk
xk_left = (np.array([0] + xk[:-1].tolist()) + xk)/2
xk_right = (np.array(xk[1:].tolist() + [xk[-1]+1]) + xk)/2
# Generate points to the left and right of cdf
cdf2 = np.cumsum(pk)
cdf2_left = (np.array([0] + cdf2[:-1].tolist()) + cdf2)/2
cdf2_right = (np.array(cdf2[1:].tolist() + [1]) + cdf2)/2
# support - leftmost and rightmost xk
a, b = dist.support()
assert_allclose(a, xk[0])
assert_allclose(b, xk[-1])
# pmf - supported only on the xk
assert_allclose(dist.pmf(xk), pk)
assert_allclose(dist.pmf(xk_right), 0)
assert_allclose(dist.pmf(xk_left), 0)
# logpmf is log of the pmf; log(0) = -np.inf
with np.errstate(divide='ignore'):
assert_allclose(dist.logpmf(xk), np.log(pk))
assert_allclose(dist.logpmf(xk_right), -np.inf)
assert_allclose(dist.logpmf(xk_left), -np.inf)
# cdf - the cumulative sum of the pmf
assert_allclose(dist.cdf(xk), cdf2)
assert_allclose(dist.cdf(xk_right), cdf2)
assert_allclose(dist.cdf(xk_left), [0]+cdf2[:-1].tolist())
with np.errstate(divide='ignore'):
assert_allclose(dist.logcdf(xk), np.log(dist.cdf(xk)),
atol=1e-15)
assert_allclose(dist.logcdf(xk_right), np.log(dist.cdf(xk_right)),
atol=1e-15)
assert_allclose(dist.logcdf(xk_left), np.log(dist.cdf(xk_left)),
atol=1e-15)
# sf is 1-cdf
assert_allclose(dist.sf(xk), 1-dist.cdf(xk))
assert_allclose(dist.sf(xk_right), 1-dist.cdf(xk_right))
assert_allclose(dist.sf(xk_left), 1-dist.cdf(xk_left))
with np.errstate(divide='ignore'):
assert_allclose(dist.logsf(xk), np.log(dist.sf(xk)),
atol=1e-15)
assert_allclose(dist.logsf(xk_right), np.log(dist.sf(xk_right)),
atol=1e-15)
assert_allclose(dist.logsf(xk_left), np.log(dist.sf(xk_left)),
atol=1e-15)
# ppf
assert_allclose(dist.ppf(cdf2), xk)
assert_allclose(dist.ppf(cdf2_left), xk)
assert_allclose(dist.ppf(cdf2_right)[:-1], xk[1:])
assert_allclose(dist.ppf(0), a - 1)
assert_allclose(dist.ppf(1), b)
# isf
sf2 = dist.sf(xk)
assert_allclose(dist.isf(sf2), xk)
assert_allclose(dist.isf(1-cdf2_left), dist.ppf(cdf2_left))
assert_allclose(dist.isf(1-cdf2_right), dist.ppf(cdf2_right))
assert_allclose(dist.isf(0), b)
assert_allclose(dist.isf(1), a - 1)
# interval is (ppf(alpha/2), isf(alpha/2))
ps = np.linspace(0.01, 0.99, 10)
int2 = dist.ppf(ps/2), dist.isf(ps/2)
assert_allclose(dist.interval(1-ps), int2)
assert_allclose(dist.interval(0), dist.median())
assert_allclose(dist.interval(1), (a-1, b))
# median is simply ppf(0.5)
med2 = dist.ppf(0.5)
assert_allclose(dist.median(), med2)
# all four stats (mean, var, skew, and kurtosis) from the definitions
mean2 = np.sum(xk*pk)
var2 = np.sum((xk - mean2)**2 * pk)
skew2 = np.sum((xk - mean2)**3 * pk) / var2**(3/2)
kurt2 = np.sum((xk - mean2)**4 * pk) / var2**2 - 3
assert_allclose(dist.mean(), mean2)
assert_allclose(dist.std(), np.sqrt(var2))
assert_allclose(dist.var(), var2)
assert_allclose(dist.stats(moments='mvsk'), (mean2, var2, skew2, kurt2))
# noncentral moment against definition
mom3 = np.sum((xk**3) * pk)
assert_allclose(dist.moment(3), mom3)
# expect - check against moments
assert_allclose(dist.expect(lambda x: 1), 1)
assert_allclose(dist.expect(), mean2)
assert_allclose(dist.expect(lambda x: x**3), mom3)
# entropy is the negative of the expected value of log(p)
with np.errstate(divide='ignore'):
assert_allclose(-dist.expect(lambda x: dist.logpmf(x)), dist.entropy())
# RVS is just ppf of uniform random variates
rng = np.random.default_rng(98430143469)
rvs = dist.rvs(size=100, random_state=rng)
rng = np.random.default_rng(98430143469)
rvs0 = dist.ppf(rng.random(size=100))
assert_allclose(rvs, rvs0)
| 19,847
| 35.552486
| 80
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_kdeoth.py
|
from scipy import stats, linalg, integrate
import numpy as np
from numpy.testing import (assert_almost_equal, assert_, assert_equal,
assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_allclose)
import pytest
from pytest import raises as assert_raises
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_1d_weighted():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
wn = np.random.rand(n_basesample)
xnmean = np.average(xn, weights=wn)
xnstd = np.sqrt(np.average((xn-xnmean)**2, weights=wn))
# get kde for original sample
gkde = stats.gaussian_kde(xn, weights=wn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
@pytest.mark.slow
def test_kde_2d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
@pytest.mark.slow
def test_kde_2d_weighted():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
wn = np.random.rand(n_basesample)
# get kde for original sample
gkde = stats.gaussian_kde(xn, weights=wn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
def test_kde_bandwidth_method_weighted():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.neff, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super().__init__(dataset)
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3 was removed because we have no obligation to maintain support
# for user invocation of private methods
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
_ftypes = ['float32', 'float64', 'float96', 'float128', 'int32', 'int64']
@pytest.mark.parametrize("bw_type", _ftypes + ["scott", "silverman"])
@pytest.mark.parametrize("dtype", _ftypes)
def test_kde_output_dtype(dtype, bw_type):
# Check whether the datatypes are available
dtype = getattr(np, dtype, None)
if bw_type in ["scott", "silverman"]:
bw = bw_type
else:
bw_type = getattr(np, bw_type, None)
bw = bw_type(3) if bw_type else None
if any(dt is None for dt in [dtype, bw]):
pytest.skip()
weights = np.arange(5, dtype=dtype)
dataset = np.arange(5, dtype=dtype)
k = stats.gaussian_kde(dataset, bw_method=bw, weights=weights)
points = np.arange(5, dtype=dtype)
result = k(points)
# weights are always cast to float64
assert result.dtype == np.result_type(dataset, points, np.float64(weights),
k.factor)
def test_pdf_logpdf_validation():
rng = np.random.default_rng(64202298293133848336925499069837723291)
xn = rng.standard_normal((2, 10))
gkde = stats.gaussian_kde(xn)
xs = rng.standard_normal((3, 10))
msg = "points have dimension 3, dataset has dimension 2"
with pytest.raises(ValueError, match=msg):
gkde.logpdf(xs)
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
def test_pdf_logpdf_weighted():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
wn = np.random.rand(n_basesample)
# Default
gkde = stats.gaussian_kde(xn, weights=wn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs, weights=np.random.rand(len(xs)))
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
def test_marginal_1_axis():
rng = np.random.default_rng(6111799263660870475)
n_data = 50
n_dim = 10
dataset = rng.normal(size=(n_dim, n_data))
points = rng.normal(size=(n_dim, 3))
dimensions = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) # dimensions to keep
kde = stats.gaussian_kde(dataset)
marginal = kde.marginal(dimensions)
pdf = marginal.pdf(points[dimensions])
def marginal_pdf_single(point):
def f(x):
x = np.concatenate(([x], point[dimensions]))
return kde.pdf(x)[0]
return integrate.quad(f, -np.inf, np.inf)[0]
def marginal_pdf(points):
return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
ref = marginal_pdf(points)
assert_allclose(pdf, ref, rtol=1e-6)
@pytest.mark.xslow
def test_marginal_2_axis():
rng = np.random.default_rng(6111799263660870475)
n_data = 30
n_dim = 4
dataset = rng.normal(size=(n_dim, n_data))
points = rng.normal(size=(n_dim, 3))
dimensions = np.array([1, 3]) # dimensions to keep
kde = stats.gaussian_kde(dataset)
marginal = kde.marginal(dimensions)
pdf = marginal.pdf(points[dimensions])
def marginal_pdf(points):
def marginal_pdf_single(point):
def f(y, x):
w, z = point[dimensions]
x = np.array([x, w, y, z])
return kde.pdf(x)[0]
return integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)[0]
return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
ref = marginal_pdf(points)
assert_allclose(pdf, ref, rtol=1e-6)
def test_marginal_iv():
# test input validation
rng = np.random.default_rng(6111799263660870475)
n_data = 30
n_dim = 4
dataset = rng.normal(size=(n_dim, n_data))
points = rng.normal(size=(n_dim, 3))
kde = stats.gaussian_kde(dataset)
# check that positive and negative indices are equivalent
dimensions1 = [-1, 1]
marginal1 = kde.marginal(dimensions1)
pdf1 = marginal1.pdf(points[dimensions1])
dimensions2 = [3, -3]
marginal2 = kde.marginal(dimensions2)
pdf2 = marginal2.pdf(points[dimensions2])
assert_equal(pdf1, pdf2)
# IV for non-integer dimensions
message = "Elements of `dimensions` must be integers..."
with pytest.raises(ValueError, match=message):
kde.marginal([1, 2.5])
# IV for uniquenes
message = "All elements of `dimensions` must be unique."
with pytest.raises(ValueError, match=message):
kde.marginal([1, 2, 2])
# IV for non-integer dimensions
message = (r"Dimensions \[-5 6\] are invalid for a distribution in 4...")
with pytest.raises(ValueError, match=message):
kde.marginal([1, -5, 6])
@pytest.mark.xslow
def test_logpdf_overflow():
# regression test for gh-12988; testing against linalg instability for
# very high dimensionality kde
np.random.seed(1)
n_dimensions = 2500
n_samples = 5000
xn = np.array([np.random.randn(n_samples) + (n) for n in range(
0, n_dimensions)])
# Default
gkde = stats.gaussian_kde(xn)
logpdf = gkde.logpdf(np.arange(0, n_dimensions))
np.testing.assert_equal(np.isneginf(logpdf[0]), False)
np.testing.assert_equal(np.isnan(logpdf[0]), False)
def test_weights_intact():
# regression test for gh-9709: weights are not modified
np.random.seed(12345)
vals = np.random.lognormal(size=100)
weights = np.random.choice([1.0, 10.0, 100], size=vals.size)
orig_weights = weights.copy()
stats.gaussian_kde(np.log10(vals), weights=weights)
assert_allclose(weights, orig_weights, atol=1e-14, rtol=1e-14)
def test_weights_integer():
# integer weights are OK, cf gh-9709 (comment)
np.random.seed(12345)
values = [0.2, 13.5, 21.0, 75.0, 99.0]
weights = [1, 2, 4, 8, 16] # a list of integers
pdf_i = stats.gaussian_kde(values, weights=weights)
pdf_f = stats.gaussian_kde(values, weights=np.float64(weights))
xn = [0.3, 11, 88]
assert_allclose(pdf_i.evaluate(xn),
pdf_f.evaluate(xn), atol=1e-14, rtol=1e-14)
def test_seed():
# Test the seed option of the resample method
def test_seed_sub(gkde_trail):
n_sample = 200
# The results should be different without using seed
samp1 = gkde_trail.resample(n_sample)
samp2 = gkde_trail.resample(n_sample)
assert_raises(
AssertionError, assert_allclose, samp1, samp2, atol=1e-13
)
# Use integer seed
seed = 831
samp1 = gkde_trail.resample(n_sample, seed=seed)
samp2 = gkde_trail.resample(n_sample, seed=seed)
assert_allclose(samp1, samp2, atol=1e-13)
# Use RandomState
rstate1 = np.random.RandomState(seed=138)
samp1 = gkde_trail.resample(n_sample, seed=rstate1)
rstate2 = np.random.RandomState(seed=138)
samp2 = gkde_trail.resample(n_sample, seed=rstate2)
assert_allclose(samp1, samp2, atol=1e-13)
# check that np.random.Generator can be used (numpy >= 1.17)
if hasattr(np.random, 'default_rng'):
# obtain a np.random.Generator object
rng = np.random.default_rng(1234)
gkde_trail.resample(n_sample, seed=rng)
np.random.seed(8765678)
n_basesample = 500
wn = np.random.rand(n_basesample)
# Test 1D case
xn_1d = np.random.randn(n_basesample)
gkde_1d = stats.gaussian_kde(xn_1d)
test_seed_sub(gkde_1d)
gkde_1d_weighted = stats.gaussian_kde(xn_1d, weights=wn)
test_seed_sub(gkde_1d_weighted)
# Test 2D case
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
xn_2d = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
gkde_2d = stats.gaussian_kde(xn_2d)
test_seed_sub(gkde_2d)
gkde_2d_weighted = stats.gaussian_kde(xn_2d, weights=wn)
test_seed_sub(gkde_2d_weighted)
def test_singular_data_covariance_gh10205():
# When the data lie in a lower-dimensional subspace and this causes
# and exception, check that the error message is informative.
rng = np.random.default_rng(2321583144339784787)
mu = np.array([1, 10, 20])
sigma = np.array([[4, 10, 0], [10, 25, 0], [0, 0, 100]])
data = rng.multivariate_normal(mu, sigma, 1000)
try: # doesn't raise any error on some platforms, and that's OK
stats.gaussian_kde(data.T)
except linalg.LinAlgError:
msg = "The data appears to lie in a lower-dimensional subspace..."
with assert_raises(linalg.LinAlgError, match=msg):
stats.gaussian_kde(data.T)
def test_fewer_points_than_dimensions_gh17436():
# When the number of points is fewer than the number of dimensions, the
# the covariance matrix would be singular, and the exception tested in
# test_singular_data_covariance_gh10205 would occur. However, sometimes
# this occurs when the user passes in the transpose of what `gaussian_kde`
# expects. This can result in a huge covariance matrix, so bail early.
rng = np.random.default_rng(2046127537594925772)
rvs = rng.multivariate_normal(np.zeros(3), np.eye(3), size=5)
message = "Number of dimensions is greater than number of samples..."
with pytest.raises(ValueError, match=message):
stats.gaussian_kde(rvs)
| 20,382
| 32.579901
| 89
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_resampling.py
|
import numpy as np
import pytest
from scipy.stats import bootstrap, monte_carlo_test, permutation_test
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from .. import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True`, `False`, or `None`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a non-negative integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a non-negative integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`bootstrap_result` must have attribute `bootstrap_distribution'"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, bootstrap_result=10)
message = "Either `bootstrap_result.bootstrap_distribution.size`"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=0)
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = np.random.rand(10, 11, 12)
res1 = bootstrap((x,), np.mean, batch=None, method=method,
random_state=0, axis=axis, n_resamples=100)
res2 = bootstrap((x,), np.mean, batch=10, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
assert_equal(res2.standard_error, res1.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_paired(method):
# test that `paired` works as expected
np.random.seed(0)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
def my_statistic(x, y, axis=-1):
return ((x-y)**2).mean(axis=axis)
def my_paired_statistic(i, axis=-1):
a = x[i]
b = y[i]
res = my_statistic(a, b)
return res
i = np.arange(len(x))
res1 = bootstrap((i,), my_paired_statistic, random_state=0)
res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("paired", [True, False])
def test_bootstrap_vectorized(method, axis, paired):
# test that paired is vectorized as expected: when samples are tiled,
# CI and standard_error of each axis-slice is the same as those of the
# original 1d sample
np.random.seed(0)
def my_statistic(x, y, z, axis=-1):
return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
shape = 10, 11, 12
n_samples = shape[axis]
x = np.random.rand(n_samples)
y = np.random.rand(n_samples)
z = np.random.rand(n_samples)
res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=0, n_resamples=100)
assert (res1.bootstrap_distribution.shape
== res1.standard_error.shape + (100,))
reshape = [1, 1, 1]
reshape[axis] = n_samples
x = np.broadcast_to(x.reshape(reshape), shape)
y = np.broadcast_to(y.reshape(reshape), shape)
z = np.broadcast_to(z.reshape(reshape), shape)
res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_allclose(res2.confidence_interval.low,
res1.confidence_interval.low)
assert_allclose(res2.confidence_interval.high,
res1.confidence_interval.high)
assert_allclose(res2.standard_error, res1.standard_error)
result_shape = list(shape)
result_shape.pop(axis)
assert_equal(res2.confidence_interval.low.shape, result_shape)
assert_equal(res2.confidence_interval.high.shape, result_shape)
assert_equal(res2.standard_error.shape, result_shape)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_against_theory(method):
# based on https://www.statology.org/confidence-intervals-python/
rng = np.random.default_rng(2442101192988600726)
data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=rng)
alpha = 0.95
dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
expected_interval = dist.interval(confidence=alpha)
expected_se = dist.std()
config = dict(data=(data,), statistic=np.mean, n_resamples=5000,
method=method, random_state=rng)
res = bootstrap(**config, confidence_level=alpha)
assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
assert_allclose(res.standard_error, expected_se, atol=3e-4)
config.update(dict(n_resamples=0, bootstrap_result=res))
res = bootstrap(**config, confidence_level=alpha, alternative='less')
assert_allclose(res.confidence_interval.high, dist.ppf(alpha), rtol=5e-4)
config.update(dict(n_resamples=0, bootstrap_result=res))
res = bootstrap(**config, confidence_level=alpha, alternative='greater')
assert_allclose(res.confidence_interval.low, dist.ppf(1-alpha), rtol=5e-4)
tests_R = {"basic": (23.77, 79.12),
"percentile": (28.86, 84.21),
"BCa": (32.31, 91.43)}
@pytest.mark.parametrize("method, expected", tests_R.items())
def test_bootstrap_against_R(method, expected):
# Compare against R's "boot" library
# library(boot)
# stat <- function (x, a) {
# mean(x[a])
# }
# x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
# 23, 34, 50, 81, 89, 121, 134, 213)
# # Use a large value so we get a few significant digits for the CI.
# n = 1000000
# bootresult = boot(x, stat, n)
# result <- boot.ci(bootresult)
# print(result)
x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
23, 34, 50, 81, 89, 121, 134, 213])
res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected, rtol=0.005)
tests_against_itself_1samp = {"basic": 1780,
"percentile": 1784,
"BCa": 1784}
def test_multisample_BCa_against_R():
# Because bootstrap is stochastic, it's tricky to test against reference
# behavior. Here, we show that SciPy's BCa CI matches R wboot's BCa CI
# much more closely than the other SciPy CIs do.
# arbitrary skewed data
x = [0.75859206, 0.5910282, -0.4419409, -0.36654601,
0.34955357, -1.38835871, 0.76735821]
y = [1.41186073, 0.49775975, 0.08275588, 0.24086388,
0.03567057, 0.52024419, 0.31966611, 1.32067634]
# a multi-sample statistic for which the BCa CI tends to be different
# from the other CIs
def statistic(x, y, axis):
s1 = stats.skew(x, axis=axis)
s2 = stats.skew(y, axis=axis)
return s1 - s2
# compute confidence intervals using each method
rng = np.random.default_rng(468865032284792692)
res_basic = stats.bootstrap((x, y), statistic, method='basic',
batch=100, random_state=rng)
res_percent = stats.bootstrap((x, y), statistic, method='percentile',
batch=100, random_state=rng)
res_bca = stats.bootstrap((x, y), statistic, method='bca',
batch=100, random_state=rng)
# compute midpoints so we can compare just one number for each
mid_basic = np.mean(res_basic.confidence_interval)
mid_percent = np.mean(res_percent.confidence_interval)
mid_bca = np.mean(res_bca.confidence_interval)
# reference for BCA CI computed using R wboot package:
# library(wBoot)
# library(moments)
# x = c(0.75859206, 0.5910282, -0.4419409, -0.36654601,
# 0.34955357, -1.38835871, 0.76735821)
# y = c(1.41186073, 0.49775975, 0.08275588, 0.24086388,
# 0.03567057, 0.52024419, 0.31966611, 1.32067634)
# twoskew <- function(x1, y1) {skewness(x1) - skewness(y1)}
# boot.two.bca(x, y, skewness, conf.level = 0.95,
# R = 9999, stacked = FALSE)
mid_wboot = -1.5519
# compute percent difference relative to wboot BCA method
diff_basic = (mid_basic - mid_wboot)/abs(mid_wboot)
diff_percent = (mid_percent - mid_wboot)/abs(mid_wboot)
diff_bca = (mid_bca - mid_wboot)/abs(mid_wboot)
# SciPy's BCa CI midpoint is much closer than that of the other methods
assert diff_basic < -0.15
assert diff_percent > 0.15
assert abs(diff_bca) < 0.03
def test_BCa_acceleration_against_reference():
# Compare the (deterministic) acceleration parameter for a multi-sample
# problem against a reference value. The example is from [1], but Efron's
# value seems inaccurate. Straightorward code for computing the
# reference acceleration (0.011008228344026734) is available at:
# https://github.com/scipy/scipy/pull/16455#issuecomment-1193400981
y = np.array([10, 27, 31, 40, 46, 50, 52, 104, 146])
z = np.array([16, 23, 38, 94, 99, 141, 197])
def statistic(z, y, axis=0):
return np.mean(z, axis=axis) - np.mean(y, axis=axis)
data = [z, y]
res = stats.bootstrap(data, statistic)
axis = -1
alpha = 0.95
theta_hat_b = res.bootstrap_distribution
batch = 100
_, _, a_hat = _resampling._bca_interval(data, statistic, axis, alpha,
theta_hat_b, batch)
assert_allclose(a_hat, 0.011008228344026734)
@pytest.mark.parametrize("method, expected",
tests_against_itself_1samp.items())
def test_bootstrap_against_itself_1samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n = 100 # size of sample
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The true mean is 5
dist = stats.norm(loc=5, scale=1)
stat_true = dist.mean()
# Do the same thing 2000 times. (The code is fully vectorized.)
n_replications = 2000
data = dist.rvs(size=(n_replications, n))
res = bootstrap((data,),
statistic=np.mean,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
tests_against_itself_2samp = {"basic": 892,
"percentile": 890}
@pytest.mark.parametrize("method, expected",
tests_against_itself_2samp.items())
def test_bootstrap_against_itself_2samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n1 = 100 # size of sample 1
n2 = 120 # size of sample 2
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The statistic we're interested in is the difference in means
def my_stat(data1, data2, axis=-1):
mean1 = np.mean(data1, axis=axis)
mean2 = np.mean(data2, axis=axis)
return mean1 - mean2
# The true difference in the means is -0.1
dist1 = stats.norm(loc=0, scale=1)
dist2 = stats.norm(loc=0.1, scale=1)
stat_true = dist1.mean() - dist2.mean()
# Do the same thing 1000 times. (The code is fully vectorized.)
n_replications = 1000
data1 = dist1.rvs(size=(n_replications, n1))
data2 = dist2.rvs(size=(n_replications, n2))
res = bootstrap((data1, data2),
statistic=my_stat,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
@pytest.mark.parametrize("method", ["basic", "percentile"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_3samp(method, axis):
def statistic(*data, axis=0):
# an arbitrary, vectorized statistic
return sum(sample.mean(axis) for sample in data)
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
y = np.random.rand(4, 5)
z = np.random.rand(4, 5)
res1 = bootstrap((x, y, z), statistic, vectorized=True,
axis=axis, n_resamples=100, method=method, random_state=0)
res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
axis=axis, n_resamples=100, method=method, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_1samp(method, axis):
def statistic(x, axis=0):
# an arbitrary, vectorized statistic
return x.mean(axis=axis)
def statistic_1d(x):
# the same statistic, not vectorized
assert x.ndim == 1
return statistic(x, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
n_resamples=100, batch=None, method=method,
random_state=0)
res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
n_resamples=100, batch=10, method=method,
random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_degenerate(method):
data = 35 * [10000.]
if method == "BCa":
with np.errstate(invalid='ignore'):
msg = "The BCa confidence interval cannot be calculated"
with pytest.warns(stats.DegenerateDataWarning, match=msg):
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (np.nan, np.nan))
else:
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (10000., 10000.))
assert_equal(res.standard_error, 0)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_gh15678(method):
# Check that gh-15678 is fixed: when statistic function returned a Python
# float, method="BCa" failed when trying to add a dimension to the float
rng = np.random.default_rng(354645618886684)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
data = (data,)
res = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563))
# this always worked because np.apply_along_axis returns NumPy data type
ref = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563), vectorized=False)
assert_allclose(res.confidence_interval, ref.confidence_interval)
assert_allclose(res.standard_error, ref.standard_error)
assert isinstance(res.standard_error, np.float64)
def test_bootstrap_min():
# Check that gh-15883 is fixed: percentileofscore should
# behave according to the 'mean' behavior and not trigger nan for BCa
rng = np.random.default_rng(1891289180021102)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
true_min = np.min(data)
data = (data,)
res = bootstrap(data, np.min, method="BCa", n_resamples=100,
random_state=np.random.default_rng(3942))
assert true_min == res.confidence_interval.low
res2 = bootstrap(-np.array(data), np.max, method="BCa", n_resamples=100,
random_state=np.random.default_rng(3942))
assert_allclose(-res.confidence_interval.low,
res2.confidence_interval.high)
assert_allclose(-res.confidence_interval.high,
res2.confidence_interval.low)
@pytest.mark.parametrize("additional_resamples", [0, 1000])
def test_re_bootstrap(additional_resamples):
# Test behavior of parameter `bootstrap_result`
rng = np.random.default_rng(8958153316228384)
x = rng.random(size=100)
n1 = 1000
n2 = additional_resamples
n3 = n1 + additional_resamples
rng = np.random.default_rng(296689032789913033)
res = stats.bootstrap((x,), np.mean, n_resamples=n1, random_state=rng,
confidence_level=0.95, method='percentile')
res = stats.bootstrap((x,), np.mean, n_resamples=n2, random_state=rng,
confidence_level=0.90, method='BCa',
bootstrap_result=res)
rng = np.random.default_rng(296689032789913033)
ref = stats.bootstrap((x,), np.mean, n_resamples=n3, random_state=rng,
confidence_level=0.90, method='BCa')
assert_allclose(res.standard_error, ref.standard_error, rtol=1e-14)
assert_allclose(res.confidence_interval, ref.confidence_interval,
rtol=1e-14)
@pytest.mark.xfail_on_32bit("Sensible to machine precision")
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_alternative(method):
rng = np.random.default_rng(5894822712842015040)
dist = stats.norm(loc=2, scale=4)
data = (dist.rvs(size=(100), random_state=rng),)
config = dict(data=data, statistic=np.std, random_state=rng, axis=-1)
t = stats.bootstrap(**config, confidence_level=0.9)
config.update(dict(n_resamples=0, bootstrap_result=t))
l = stats.bootstrap(**config, confidence_level=0.95, alternative='less')
g = stats.bootstrap(**config, confidence_level=0.95, alternative='greater')
assert_equal(l.confidence_interval.high, t.confidence_interval.high)
assert_equal(g.confidence_interval.low, t.confidence_interval.low)
assert np.isneginf(l.confidence_interval.low)
assert np.isposinf(g.confidence_interval.high)
with pytest.raises(ValueError, match='`alternative` must be one of'):
stats.bootstrap(**config, alternative='ekki-ekki')
def test_jackknife_resample():
shape = 3, 4, 5, 6
np.random.seed(0)
x = np.random.rand(*shape)
y = next(_resampling._jackknife_resample(x))
for i in range(shape[-1]):
# each resample is indexed along second to last axis
# (last axis is the one the statistic will be taken over / consumed)
slc = y[..., i, :]
expected = np.delete(x, i, axis=-1)
assert np.array_equal(slc, expected)
y2 = np.concatenate(list(_resampling._jackknife_resample(x, batch=2)),
axis=-2)
assert np.array_equal(y2, y)
@pytest.mark.parametrize("rng_name", ["RandomState", "default_rng"])
def test_bootstrap_resample(rng_name):
rng = getattr(np.random, rng_name, None)
if rng is None:
pytest.skip(f"{rng_name} not available.")
rng1 = rng(0)
rng2 = rng(0)
n_resamples = 10
shape = 3, 4, 5, 6
np.random.seed(0)
x = np.random.rand(*shape)
y = _resampling._bootstrap_resample(x, n_resamples, random_state=rng1)
for i in range(n_resamples):
# each resample is indexed along second to last axis
# (last axis is the one the statistic will be taken over / consumed)
slc = y[..., i, :]
js = rng_integers(rng2, 0, shape[-1], shape[-1])
expected = x[..., js]
assert np.array_equal(slc, expected)
@pytest.mark.parametrize("score", [0, 0.5, 1])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_percentile_of_score(score, axis):
shape = 10, 20, 30
np.random.seed(0)
x = np.random.rand(*shape)
p = _resampling._percentile_of_score(x, score, axis=-1)
def vectorized_pos(a, score, axis):
return np.apply_along_axis(stats.percentileofscore, axis, a, score)
p2 = vectorized_pos(x, score, axis=-1)/100
assert_allclose(p, p2, 1e-15)
def test_percentile_along_axis():
# the difference between _percentile_along_axis and np.percentile is that
# np.percentile gets _all_ the qs for each axis slice, whereas
# _percentile_along_axis gets the q corresponding with each axis slice
shape = 10, 20
np.random.seed(0)
x = np.random.rand(*shape)
q = np.random.rand(*shape[:-1]) * 100
y = _resampling._percentile_along_axis(x, q)
for i in range(shape[0]):
res = y[i]
expected = np.percentile(x[i], q[i], axis=-1)
assert_allclose(res, expected, 1e-15)
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_vectorize_statistic(axis):
# test that _vectorize_statistic vectorizes a statistic along `axis`
def statistic(*data, axis):
# an arbitrary, vectorized statistic
return sum(sample.mean(axis) for sample in data)
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
# vectorize the non-vectorized statistic
statistic2 = _resampling._vectorize_statistic(statistic_1d)
np.random.seed(0)
x = np.random.rand(4, 5, 6)
y = np.random.rand(4, 1, 6)
z = np.random.rand(1, 5, 6)
res1 = statistic(x, y, z, axis=axis)
res2 = statistic2(x, y, z, axis=axis)
assert_allclose(res1, res2)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_vector_valued_statistic(method):
# Generate 95% confidence interval around MLE of normal distribution
# parameters. Repeat 100 times, each time on sample of size 100.
# Check that confidence interval contains true parameters ~95 times.
# Confidence intervals are estimated and stochastic; a test failure
# does not necessarily indicate that something is wrong. More important
# than values of `counts` below is that the shapes of the outputs are
# correct.
rng = np.random.default_rng(2196847219)
params = 1, 0.5
sample = stats.norm.rvs(*params, size=(100, 100), random_state=rng)
def statistic(data, axis):
return np.asarray([np.mean(data, axis),
np.std(data, axis, ddof=1)])
res = bootstrap((sample,), statistic, method=method, axis=-1,
n_resamples=9999, batch=200)
counts = np.sum((res.confidence_interval.low.T < params)
& (res.confidence_interval.high.T > params),
axis=0)
assert np.all(counts >= 90)
assert np.all(counts <= 100)
assert res.confidence_interval.low.shape == (2, 100)
assert res.confidence_interval.high.shape == (2, 100)
assert res.standard_error.shape == (2, 100)
assert res.bootstrap_distribution.shape == (2, 100, 9999)
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_vector_valued_statistic_gh17715():
# gh-17715 reported a mistake introduced in the extension of BCa to
# multi-sample statistics; a `len` should have been `.shape[-1]`. Check
# that this is resolved.
rng = np.random.default_rng(141921000979291141)
def concordance(x, y, axis):
xm = x.mean(axis)
ym = y.mean(axis)
cov = ((x - xm[..., None]) * (y - ym[..., None])).mean(axis)
return (2 * cov) / (x.var(axis) + y.var(axis) + (xm - ym) ** 2)
def statistic(tp, tn, fp, fn, axis):
actual = tp + fp
expected = tp + fn
return np.nan_to_num(concordance(actual, expected, axis))
def statistic_extradim(*args, axis):
return statistic(*args, axis)[np.newaxis, ...]
data = [[4, 0, 0, 2], # (tp, tn, fp, fn)
[2, 1, 2, 1],
[0, 6, 0, 0],
[0, 6, 3, 0],
[0, 8, 1, 0]]
data = np.array(data).T
res = bootstrap(data, statistic_extradim, random_state=rng, paired=True)
ref = bootstrap(data, statistic, random_state=rng, paired=True)
assert_allclose(res.confidence_interval.low[0],
ref.confidence_interval.low, atol=1e-15)
assert_allclose(res.confidence_interval.high[0],
ref.confidence_interval.high, atol=1e-15)
# --- Test Monte Carlo Hypothesis Test --- #
class TestMonteCarloHypothesisTest:
atol = 2.5e-2 # for comparing p-value
def rvs(self, rvs_in, rs):
return lambda *args, **kwds: rvs_in(*args, random_state=rs, **kwds)
def test_input_validation(self):
# test that the appropriate error messages are raised for invalid input
def stat(x):
return stats.skewnorm(x).statistic
message = "Array shapes are incompatible for broadcasting."
data = (np.zeros((2, 5)), np.zeros((3, 5)))
rvs = (stats.norm.rvs, stats.norm.rvs)
with pytest.raises(ValueError, match=message):
monte_carlo_test(data, rvs, lambda x, y: 1, axis=-1)
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, axis=1.5)
message = "`vectorized` must be `True`, `False`, or `None`."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, vectorized=1.5)
message = "`rvs` must be callable or sequence of callables."
with pytest.raises(TypeError, match=message):
monte_carlo_test([1, 2, 3], None, stat)
with pytest.raises(TypeError, match=message):
monte_carlo_test([[1, 2], [3, 4]], [lambda x: x, None], stat)
message = "If `rvs` is a sequence..."
with pytest.raises(ValueError, match=message):
monte_carlo_test([[1, 2, 3]], [lambda x: x, lambda x: x], stat)
message = "`statistic` must be callable."
with pytest.raises(TypeError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, None)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=1000.5)
message = "`alternative` must be in..."
with pytest.raises(ValueError, match=message):
monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
alternative='ekki')
def test_batch(self):
# make sure that the `batch` parameter is respected by checking the
# maximum batch size provided in calls to `statistic`
rng = np.random.default_rng(23492340193)
x = rng.random(10)
def statistic(x, axis):
batch_size = 1 if x.ndim == 1 else len(x)
statistic.batch_size = max(batch_size, statistic.batch_size)
statistic.counter += 1
return stats.skewtest(x, axis=axis).statistic
statistic.counter = 0
statistic.batch_size = 0
kwds = {'sample': x, 'statistic': statistic,
'n_resamples': 1000, 'vectorized': True}
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
res1 = monte_carlo_test(batch=1, **kwds)
assert_equal(statistic.counter, 1001)
assert_equal(statistic.batch_size, 1)
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
statistic.counter = 0
res2 = monte_carlo_test(batch=50, **kwds)
assert_equal(statistic.counter, 21)
assert_equal(statistic.batch_size, 50)
kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
statistic.counter = 0
res3 = monte_carlo_test(**kwds)
assert_equal(statistic.counter, 2)
assert_equal(statistic.batch_size, 1000)
assert_equal(res1.pvalue, res3.pvalue)
assert_equal(res2.pvalue, res3.pvalue)
@pytest.mark.parametrize('axis', range(-3, 3))
def test_axis(self, axis):
# test that Nd-array samples are handled correctly for valid values
# of the `axis` parameter
rng = np.random.default_rng(2389234)
norm_rvs = self.rvs(stats.norm.rvs, rng)
size = [2, 3, 4]
size[axis] = 100
x = norm_rvs(size=size)
expected = stats.skewtest(x, axis=axis)
def statistic(x, axis):
return stats.skewtest(x, axis=axis).statistic
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
n_resamples=20000, axis=axis)
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('alternative', ("less", "greater"))
@pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness
def test_against_ks_1samp(self, alternative, a):
# test that monte_carlo_test can reproduce pvalue of ks_1samp
rng = np.random.default_rng(65723433)
x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
expected = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative)
def statistic1d(x):
return stats.ks_1samp(x, stats.norm.cdf, mode='asymp',
alternative=alternative).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic1d,
n_resamples=1000, vectorized=False,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic)
if alternative == 'greater':
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
elif alternative == 'less':
assert_allclose(1-res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('hypotest', (stats.skewtest, stats.kurtosistest))
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
@pytest.mark.parametrize('a', np.linspace(-2, 2, 5)) # skewness
def test_against_normality_tests(self, hypotest, alternative, a):
# test that monte_carlo_test can reproduce pvalue of normality tests
rng = np.random.default_rng(85723405)
x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
expected = hypotest(x, alternative=alternative)
def statistic(x, axis):
return hypotest(x, axis=axis).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
alternative=alternative)
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('a', np.arange(-2, 3)) # skewness parameter
def test_against_normaltest(self, a):
# test that monte_carlo_test can reproduce pvalue of normaltest
rng = np.random.default_rng(12340513)
x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
expected = stats.normaltest(x)
def statistic(x, axis):
return stats.normaltest(x, axis=axis).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
alternative='greater')
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness
def test_against_cramervonmises(self, a):
# test that monte_carlo_test can reproduce pvalue of cramervonmises
rng = np.random.default_rng(234874135)
x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
expected = stats.cramervonmises(x, stats.norm.cdf)
def statistic1d(x):
return stats.cramervonmises(x, stats.norm.cdf).statistic
norm_rvs = self.rvs(stats.norm.rvs, rng)
res = monte_carlo_test(x, norm_rvs, statistic1d,
n_resamples=1000, vectorized=False,
alternative='greater')
assert_allclose(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
@pytest.mark.parametrize('dist_name', ('norm', 'logistic'))
@pytest.mark.parametrize('i', range(5))
def test_against_anderson(self, dist_name, i):
# test that monte_carlo_test can reproduce results of `anderson`. Note:
# `anderson` does not provide a p-value; it provides a list of
# significance levels and the associated critical value of the test
# statistic. `i` used to index this list.
# find the skewness for which the sample statistic matches one of the
# critical values provided by `stats.anderson`
def fun(a):
rng = np.random.default_rng(394295467)
x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
expected = stats.anderson(x, dist_name)
return expected.statistic - expected.critical_values[i]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sol = root(fun, x0=0)
assert sol.success
# get the significance level (p-value) associated with that critical
# value
a = sol.x[0]
rng = np.random.default_rng(394295467)
x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
expected = stats.anderson(x, dist_name)
expected_stat = expected.statistic
expected_p = expected.significance_level[i]/100
# perform equivalent Monte Carlo test and compare results
def statistic1d(x):
return stats.anderson(x, dist_name).statistic
dist_rvs = self.rvs(getattr(stats, dist_name).rvs, rng)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = monte_carlo_test(x, dist_rvs,
statistic1d, n_resamples=1000,
vectorized=False, alternative='greater')
assert_allclose(res.statistic, expected_stat)
assert_allclose(res.pvalue, expected_p, atol=2*self.atol)
def test_p_never_zero(self):
# Use biased estimate of p-value to ensure that p-value is never zero
# per monte_carlo_test reference [1]
rng = np.random.default_rng(2190176673029737545)
x = np.zeros(100)
res = monte_carlo_test(x, rng.random, np.mean,
vectorized=True, alternative='less')
assert res.pvalue == 0.0001
def test_against_ttest_ind(self):
# test that `monte_carlo_test` can reproduce results of `ttest_ind`.
rng = np.random.default_rng(219017667302737545)
data = rng.random(size=(2, 5)), rng.random(size=7) # broadcastable
rvs = rng.normal, rng.normal
def statistic(x, y, axis):
return stats.ttest_ind(x, y, axis).statistic
res = stats.monte_carlo_test(data, rvs, statistic, axis=-1)
ref = stats.ttest_ind(data[0], [data[1]], axis=-1)
assert_allclose(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, rtol=2e-2)
def test_against_f_oneway(self):
# test that `monte_carlo_test` can reproduce results of `f_oneway`.
rng = np.random.default_rng(219017667302737545)
data = (rng.random(size=(2, 100)), rng.random(size=(2, 101)),
rng.random(size=(2, 102)), rng.random(size=(2, 103)))
rvs = rng.normal, rng.normal, rng.normal, rng.normal
def statistic(*args, axis):
return stats.f_oneway(*args, axis=axis).statistic
res = stats.monte_carlo_test(data, rvs, statistic, axis=-1,
alternative='greater')
ref = stats.f_oneway(*data, axis=-1)
assert_allclose(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, atol=1e-2)
class TestPermutationTest:
rtol = 1e-14
def setup_method(self):
self.rng = np.random.default_rng(7170559330470561044)
# -- Input validation -- #
def test_permutation_test_iv(self):
def stat(x, y, axis):
return stats.ttest_ind((x, y), axis).statistic
message = "each sample in `data` must contain two or more ..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1]), stat)
message = "`data` must be a tuple containing at least two samples"
with pytest.raises(ValueError, match=message):
permutation_test((1,), stat)
with pytest.raises(TypeError, match=message):
permutation_test(1, stat)
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, axis=1.5)
message = "`permutation_type` must be in..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat,
permutation_type="ekki")
message = "`vectorized` must be `True`, `False`, or `None`."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, vectorized=1.5)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=1000.5)
message = "`alternative` must be in..."
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat, alternative='ekki')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
permutation_test(([1, 2, 3], [1, 2, 3]), stat,
random_state='herring')
# -- Test Parameters -- #
@pytest.mark.parametrize('random_state', [np.random.RandomState,
np.random.default_rng])
@pytest.mark.parametrize('permutation_type',
['pairings', 'samples', 'independent'])
def test_batch(self, permutation_type, random_state):
# make sure that the `batch` parameter is respected by checking the
# maximum batch size provided in calls to `statistic`
x = self.rng.random(10)
y = self.rng.random(10)
def statistic(x, y, axis):
batch_size = 1 if x.ndim == 1 else len(x)
statistic.batch_size = max(batch_size, statistic.batch_size)
statistic.counter += 1
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
statistic.counter = 0
statistic.batch_size = 0
kwds = {'n_resamples': 1000, 'permutation_type': permutation_type,
'vectorized': True}
res1 = stats.permutation_test((x, y), statistic, batch=1,
random_state=random_state(0), **kwds)
assert_equal(statistic.counter, 1001)
assert_equal(statistic.batch_size, 1)
statistic.counter = 0
res2 = stats.permutation_test((x, y), statistic, batch=50,
random_state=random_state(0), **kwds)
assert_equal(statistic.counter, 21)
assert_equal(statistic.batch_size, 50)
statistic.counter = 0
res3 = stats.permutation_test((x, y), statistic, batch=1000,
random_state=random_state(0), **kwds)
assert_equal(statistic.counter, 2)
assert_equal(statistic.batch_size, 1000)
assert_equal(res1.pvalue, res3.pvalue)
assert_equal(res2.pvalue, res3.pvalue)
@pytest.mark.parametrize('random_state', [np.random.RandomState,
np.random.default_rng])
@pytest.mark.parametrize('permutation_type, exact_size',
[('pairings', special.factorial(3)**2),
('samples', 2**3),
('independent', special.binom(6, 3))])
def test_permutations(self, permutation_type, exact_size, random_state):
# make sure that the `permutations` parameter is respected by checking
# the size of the null distribution
x = self.rng.random(3)
y = self.rng.random(3)
def statistic(x, y, axis):
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
kwds = {'permutation_type': permutation_type,
'vectorized': True}
res = stats.permutation_test((x, y), statistic, n_resamples=3,
random_state=random_state(0), **kwds)
assert_equal(res.null_distribution.size, 3)
res = stats.permutation_test((x, y), statistic, **kwds)
assert_equal(res.null_distribution.size, exact_size)
# -- Randomized Permutation Tests -- #
# To get reasonable accuracy, these next three tests are somewhat slow.
# Originally, I had them passing for all combinations of permutation type,
# alternative, and RNG, but that takes too long for CI. Instead, split
# into three tests, each testing a particular combination of the three
# parameters.
def test_randomized_test_against_exact_both(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='both
alternative, rng = 'less', 0
nx, ny, permutations = 8, 9, 24000
assert special.binom(nx + ny, nx) > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = x, y
def statistic(x, y, axis):
return np.mean(x, axis=axis) - np.mean(y, axis=axis)
kwds = {'vectorized': True, 'permutation_type': 'independent',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
@pytest.mark.slow()
def test_randomized_test_against_exact_samples(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='samples'
alternative, rng = 'greater', None
nx, ny, permutations = 15, 15, 32000
assert 2**nx > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = x, y
def statistic(x, y, axis):
return np.mean(x - y, axis=axis)
kwds = {'vectorized': True, 'permutation_type': 'samples',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
def test_randomized_test_against_exact_pairings(self):
# check that the randomized and exact tests agree to reasonable
# precision for permutation_type='pairings'
alternative, rng = 'two-sided', self.rng
nx, ny, permutations = 8, 8, 40000
assert special.factorial(nx) > permutations
x = stats.norm.rvs(size=nx)
y = stats.norm.rvs(size=ny)
data = [x]
def statistic1d(x):
return stats.pearsonr(x, y)[0]
statistic = _resampling._vectorize_statistic(statistic1d)
kwds = {'vectorized': True, 'permutation_type': 'samples',
'batch': 100, 'alternative': alternative, 'random_state': rng}
res = permutation_test(data, statistic, n_resamples=permutations,
**kwds)
res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
assert res.statistic == res2.statistic
assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
@pytest.mark.parametrize('alternative', ('less', 'greater'))
# Different conventions for two-sided p-value here VS ttest_ind.
# Eventually, we can add multiple options for the two-sided alternative
# here in permutation_test.
@pytest.mark.parametrize('permutations', (30, 1e9))
@pytest.mark.parametrize('axis', (0, 1, 2))
def test_against_permutation_ttest(self, alternative, permutations, axis):
# check that this function and ttest_ind with permutations give
# essentially identical results.
x = np.arange(3*4*5).reshape(3, 4, 5)
y = np.moveaxis(np.arange(4)[:, None, None], 0, axis)
rng1 = np.random.default_rng(4337234444626115331)
res1 = stats.ttest_ind(x, y, permutations=permutations, axis=axis,
random_state=rng1, alternative=alternative)
def statistic(x, y, axis):
return stats.ttest_ind(x, y, axis=axis).statistic
rng2 = np.random.default_rng(4337234444626115331)
res2 = permutation_test((x, y), statistic, vectorized=True,
n_resamples=permutations,
alternative=alternative, axis=axis,
random_state=rng2)
assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
# -- Independent (Unpaired) Sample Tests -- #
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_ks_2samp(self, alternative):
x = self.rng.normal(size=4, scale=1)
y = self.rng.normal(size=5, loc=3, scale=3)
expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact')
def statistic1d(x, y):
return stats.ks_2samp(x, y, mode='asymp',
alternative=alternative).statistic
# ks_2samp is always a one-tailed 'greater' test
# it's the statistic that changes (D+ vs D- vs max(D+, D-))
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='greater', random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_ansari(self, alternative):
x = self.rng.normal(size=4, scale=1)
y = self.rng.normal(size=5, scale=3)
# ansari has a different convention for 'alternative'
alternative_correspondence = {"less": "greater",
"greater": "less",
"two-sided": "two-sided"}
alternative_scipy = alternative_correspondence[alternative]
expected = stats.ansari(x, y, alternative=alternative_scipy)
def statistic1d(x, y):
return stats.ansari(x, y).statistic
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative=alternative, random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_mannwhitneyu(self, alternative):
x = stats.uniform.rvs(size=(3, 5, 2), loc=0, random_state=self.rng)
y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05, random_state=self.rng)
expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative)
def statistic(x, y, axis):
return stats.mannwhitneyu(x, y, axis=axis).statistic
res = permutation_test((x, y), statistic, vectorized=True,
n_resamples=np.inf, alternative=alternative,
axis=1, random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
def test_against_cvm(self):
x = stats.norm.rvs(size=4, scale=1, random_state=self.rng)
y = stats.norm.rvs(size=5, loc=3, scale=3, random_state=self.rng)
expected = stats.cramervonmises_2samp(x, y, method='exact')
def statistic1d(x, y):
return stats.cramervonmises_2samp(x, y,
method='asymptotic').statistic
# cramervonmises_2samp has only one alternative, greater
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='greater', random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.xslow()
@pytest.mark.parametrize('axis', (-1, 2))
def test_vectorized_nsamp_ptype_both(self, axis):
# Test that permutation_test with permutation_type='independent' works
# properly for a 3-sample statistic with nd array samples of different
# (but compatible) shapes and ndims. Show that exact permutation test
# and random permutation tests approximate SciPy's asymptotic pvalues
# and that exact and random permutation test results are even closer
# to one another (than they are to the asymptotic results).
# Three samples, different (but compatible) shapes with different ndims
rng = np.random.default_rng(6709265303529651545)
x = rng.random(size=(3))
y = rng.random(size=(1, 3, 2))
z = rng.random(size=(2, 1, 4))
data = (x, y, z)
# Define the statistic (and pvalue for comparison)
def statistic1d(*data):
return stats.kruskal(*data).statistic
def pvalue1d(*data):
return stats.kruskal(*data).pvalue
statistic = _resampling._vectorize_statistic(statistic1d)
pvalue = _resampling._vectorize_statistic(pvalue1d)
# Calculate the expected results
x2 = np.broadcast_to(x, (2, 3, 3)) # broadcast manually because
y2 = np.broadcast_to(y, (2, 3, 2)) # _vectorize_statistic doesn't
z2 = np.broadcast_to(z, (2, 3, 4))
expected_statistic = statistic(x2, y2, z2, axis=axis)
expected_pvalue = pvalue(x2, y2, z2, axis=axis)
# Calculate exact and randomized permutation results
kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
'permutation_type': 'independent', 'random_state': self.rng}
res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
res2 = permutation_test(data, statistic1d, n_resamples=1000, **kwds)
# Check results
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, atol=6e-2)
assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
# -- Paired-Sample Tests -- #
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_wilcoxon(self, alternative):
x = stats.uniform.rvs(size=(3, 6, 2), loc=0, random_state=self.rng)
y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05, random_state=self.rng)
# We'll check both 1- and 2-sample versions of the same test;
# we expect identical results to wilcoxon in all cases.
def statistic_1samp_1d(z):
# 'less' ensures we get the same of two statistics every time
return stats.wilcoxon(z, alternative='less').statistic
def statistic_2samp_1d(x, y):
return stats.wilcoxon(x, y, alternative='less').statistic
def test_1d(x, y):
return stats.wilcoxon(x, y, alternative=alternative)
test = _resampling._vectorize_statistic(test_1d)
expected = test(x, y, axis=1)
expected_stat = expected[0]
expected_p = expected[1]
kwds = {'vectorized': False, 'axis': 1, 'alternative': alternative,
'permutation_type': 'samples', 'random_state': self.rng,
'n_resamples': np.inf}
res1 = permutation_test((x-y,), statistic_1samp_1d, **kwds)
res2 = permutation_test((x, y), statistic_2samp_1d, **kwds)
# `wilcoxon` returns a different statistic with 'two-sided'
assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
if alternative != 'two-sided':
assert_allclose(res2.statistic, expected_stat, rtol=self.rtol)
assert_allclose(res2.pvalue, expected_p, rtol=self.rtol)
assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
def test_against_binomtest(self, alternative):
x = self.rng.integers(0, 2, size=10)
x[x == 0] = -1
# More naturally, the test would flip elements between 0 and one.
# However, permutation_test will flip the _signs_ of the elements.
# So we have to work with +1/-1 instead of 1/0.
def statistic(x, axis=0):
return np.sum(x > 0, axis=axis)
k, n, p = statistic(x), 10, 0.5
expected = stats.binomtest(k, n, p, alternative=alternative)
res = stats.permutation_test((x,), statistic, vectorized=True,
permutation_type='samples',
n_resamples=np.inf, random_state=self.rng,
alternative=alternative)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
# -- Exact Association Tests -- #
def test_against_kendalltau(self):
x = self.rng.normal(size=6)
y = x + self.rng.normal(size=6)
expected = stats.kendalltau(x, y, method='exact')
def statistic1d(x):
return stats.kendalltau(x, y, method='asymptotic').statistic
# kendalltau currently has only one alternative, two-sided
res = permutation_test((x,), statistic1d, permutation_type='pairings',
n_resamples=np.inf, random_state=self.rng)
assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
@pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
def test_against_fisher_exact(self, alternative):
def statistic(x,):
return np.sum((x == 1) & (y == 1))
# x and y are binary random variables with some dependence
rng = np.random.default_rng(6235696159000529929)
x = (rng.random(7) > 0.6).astype(float)
y = (rng.random(7) + 0.25*x > 0.6).astype(float)
tab = stats.contingency.crosstab(x, y)[1]
res = permutation_test((x,), statistic, permutation_type='pairings',
n_resamples=np.inf, alternative=alternative,
random_state=rng)
res2 = stats.fisher_exact(tab, alternative=alternative)
assert_allclose(res.pvalue, res2[1])
@pytest.mark.xslow()
@pytest.mark.parametrize('axis', (-2, 1))
def test_vectorized_nsamp_ptype_samples(self, axis):
# Test that permutation_test with permutation_type='samples' works
# properly for a 3-sample statistic with nd array samples of different
# (but compatible) shapes and ndims. Show that exact permutation test
# reproduces SciPy's exact pvalue and that random permutation test
# approximates it.
x = self.rng.random(size=(2, 4, 3))
y = self.rng.random(size=(1, 4, 3))
z = self.rng.random(size=(2, 4, 1))
x = stats.rankdata(x, axis=axis)
y = stats.rankdata(y, axis=axis)
z = stats.rankdata(z, axis=axis)
y = y[0] # to check broadcast with different ndim
data = (x, y, z)
def statistic1d(*data):
return stats.page_trend_test(data, ranked=True,
method='asymptotic').statistic
def pvalue1d(*data):
return stats.page_trend_test(data, ranked=True,
method='exact').pvalue
statistic = _resampling._vectorize_statistic(statistic1d)
pvalue = _resampling._vectorize_statistic(pvalue1d)
expected_statistic = statistic(*np.broadcast_arrays(*data), axis=axis)
expected_pvalue = pvalue(*np.broadcast_arrays(*data), axis=axis)
# Let's forgive this use of an integer seed, please.
kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
'permutation_type': 'pairings', 'random_state': 0}
res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
res2 = permutation_test(data, statistic1d, n_resamples=5000, **kwds)
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, rtol=self.rtol)
assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
# -- Test Against External References -- #
tie_case_1 = {'x': [1, 2, 3, 4], 'y': [1.5, 2, 2.5],
'expected_less': 0.2000000000,
'expected_2sided': 0.4, # 2*expected_less
'expected_Pr_gte_S_mean': 0.3428571429, # see note below
'expected_statistic': 7.5,
'expected_avg': 9.142857, 'expected_std': 1.40698}
tie_case_2 = {'x': [111, 107, 100, 99, 102, 106, 109, 108],
'y': [107, 108, 106, 98, 105, 103, 110, 105, 104],
'expected_less': 0.1555738379,
'expected_2sided': 0.3111476758,
'expected_Pr_gte_S_mean': 0.2969971205, # see note below
'expected_statistic': 32.5,
'expected_avg': 38.117647, 'expected_std': 5.172124}
@pytest.mark.xslow() # only the second case is slow, really
@pytest.mark.parametrize('case', (tie_case_1, tie_case_2))
def test_with_ties(self, case):
"""
Results above from SAS PROC NPAR1WAY, e.g.
DATA myData;
INPUT X Y;
CARDS;
1 1
1 2
1 3
1 4
2 1.5
2 2
2 2.5
ods graphics on;
proc npar1way AB data=myData;
class X;
EXACT;
run;
ods graphics off;
Note: SAS provides Pr >= |S-Mean|, which is different from our
definition of a two-sided p-value.
"""
x = case['x']
y = case['y']
expected_statistic = case['expected_statistic']
expected_less = case['expected_less']
expected_2sided = case['expected_2sided']
expected_Pr_gte_S_mean = case['expected_Pr_gte_S_mean']
expected_avg = case['expected_avg']
expected_std = case['expected_std']
def statistic1d(x, y):
return stats.ansari(x, y).statistic
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic")
res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='less')
res2 = permutation_test((x, y), statistic1d, n_resamples=np.inf,
alternative='two-sided')
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_less, atol=1e-10)
assert_allclose(res2.pvalue, expected_2sided, atol=1e-10)
assert_allclose(res2.null_distribution.mean(), expected_avg, rtol=1e-6)
assert_allclose(res2.null_distribution.std(), expected_std, rtol=1e-6)
# SAS provides Pr >= |S-Mean|; might as well check against that, too
S = res.statistic
mean = res.null_distribution.mean()
n = len(res.null_distribution)
Pr_gte_S_mean = np.sum(np.abs(res.null_distribution-mean)
>= np.abs(S-mean))/n
assert_allclose(expected_Pr_gte_S_mean, Pr_gte_S_mean)
@pytest.mark.parametrize('alternative, expected_pvalue',
(('less', 0.9708333333333),
('greater', 0.05138888888889),
('two-sided', 0.1027777777778)))
def test_against_spearmanr_in_R(self, alternative, expected_pvalue):
"""
Results above from R cor.test, e.g.
options(digits=16)
x <- c(1.76405235, 0.40015721, 0.97873798,
2.2408932, 1.86755799, -0.97727788)
y <- c(2.71414076, 0.2488, 0.87551913,
2.6514917, 2.01160156, 0.47699563)
cor.test(x, y, method = "spearm", alternative = "t")
"""
# data comes from
# np.random.seed(0)
# x = stats.norm.rvs(size=6)
# y = x + stats.norm.rvs(size=6)
x = [1.76405235, 0.40015721, 0.97873798,
2.2408932, 1.86755799, -0.97727788]
y = [2.71414076, 0.2488, 0.87551913,
2.6514917, 2.01160156, 0.47699563]
expected_statistic = 0.7714285714285715
def statistic1d(x):
return stats.spearmanr(x, y).statistic
res = permutation_test((x,), statistic1d, permutation_type='pairings',
n_resamples=np.inf, alternative=alternative)
assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
assert_allclose(res.pvalue, expected_pvalue, atol=1e-13)
@pytest.mark.parametrize("batch", (-1, 0))
def test_batch_generator_iv(self, batch):
with pytest.raises(ValueError, match="`batch` must be positive."):
list(_resampling._batch_generator([1, 2, 3], batch))
batch_generator_cases = [(range(0), 3, []),
(range(6), 3, [[0, 1, 2], [3, 4, 5]]),
(range(8), 3, [[0, 1, 2], [3, 4, 5], [6, 7]])]
@pytest.mark.parametrize("iterable, batch, expected",
batch_generator_cases)
def test_batch_generator(self, iterable, batch, expected):
got = list(_resampling._batch_generator(iterable, batch))
assert got == expected
def test_finite_precision_statistic(self):
# Some statistics return numerically distinct values when the values
# should be equal in theory. Test that `permutation_test` accounts
# for this in some way.
x = [1, 2, 4, 3]
y = [2, 4, 6, 8]
def statistic(x, y):
return stats.pearsonr(x, y)[0]
res = stats.permutation_test((x, y), statistic, vectorized=False,
permutation_type='pairings')
r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
correct_p = 2 * np.sum(null >= r - 1e-14) / len(null)
assert pvalue == correct_p == 1/3
# Compare against other exact correlation tests using R corr.test
# options(digits=16)
# x = c(1, 2, 4, 3)
# y = c(2, 4, 6, 8)
# cor.test(x, y, alternative = "t", method = "spearman") # 0.333333333
# cor.test(x, y, alternative = "t", method = "kendall") # 0.333333333
def test_all_partitions_concatenated():
# make sure that _all_paritions_concatenated produces the correct number
# of partitions of the data into samples of the given sizes and that
# all are unique
n = np.array([3, 2, 4], dtype=int)
nc = np.cumsum(n)
all_partitions = set()
counter = 0
for partition_concatenated in _resampling._all_partitions_concatenated(n):
counter += 1
partitioning = np.split(partition_concatenated, nc[:-1])
all_partitions.add(tuple([frozenset(i) for i in partitioning]))
expected = np.prod([special.binom(sum(n[i:]), sum(n[i+1:]))
for i in range(len(n)-1)])
assert_equal(counter, expected)
assert_equal(len(all_partitions), expected)
@pytest.mark.parametrize('fun_name',
['bootstrap', 'permutation_test', 'monte_carlo_test'])
def test_parameter_vectorized(fun_name):
# Check that parameter `vectorized` is working as desired for all
# resampling functions. Results don't matter; just don't fail asserts.
rng = np.random.default_rng(75245098234592)
sample = rng.random(size=10)
def rvs(size): # needed by `monte_carlo_test`
return stats.norm.rvs(size=size, random_state=rng)
fun_options = {'bootstrap': {'data': (sample,), 'random_state': rng,
'method': 'percentile'},
'permutation_test': {'data': (sample,), 'random_state': rng,
'permutation_type': 'samples'},
'monte_carlo_test': {'sample': sample, 'rvs': rvs}}
common_options = {'n_resamples': 100}
fun = getattr(stats, fun_name)
options = fun_options[fun_name]
options.update(common_options)
def statistic(x, axis):
assert x.ndim > 1 or np.array_equal(x, sample)
return np.mean(x, axis=axis)
fun(statistic=statistic, vectorized=None, **options)
fun(statistic=statistic, vectorized=True, **options)
def statistic(x):
assert x.ndim == 1
return np.mean(x)
fun(statistic=statistic, vectorized=None, **options)
fun(statistic=statistic, vectorized=False, **options)
| 70,711
| 39.944991
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_distributions.py
|
"""
Test functions for stats module
"""
import warnings
import re
import sys
import pickle
from pathlib import Path
import os
import json
import platform
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings, IS_PYPY)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import (IntegrationWarning, quad, trapezoid,
cumulative_trapezoid)
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy, polygamma, entr
from scipy.stats._distr_params import distcont, invdistcont
from .test_discrete_basic import distdiscrete, invdistdiscrete
from scipy.stats._continuous_distns import FitDataError, _argus_phi
from scipy.optimize import root, fmin
from itertools import product
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Failing on macOS 11, Intel CPUs. See gh-14901
MACOS_INTEL = (sys.platform == 'darwin') and (platform.machine() == 'x86_64')
# distributions to skip while testing the fix for the support method
# introduced in gh-13294. These distributions are skipped as they
# always return a non-nan support for every parametrization.
skip_test_support_gh13294_regression = ['tukeylambda', 'pearson3']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = f'{a} does not have attribute {b}'
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def test_distributions_submodule():
actual = set(scipy.stats.distributions.__all__)
continuous = [dist[0] for dist in distcont] # continuous dist names
discrete = [dist[0] for dist in distdiscrete] # discrete dist names
other = ['rv_discrete', 'rv_continuous', 'rv_histogram',
'entropy', 'trapz']
expected = continuous + discrete + other
# need to remove, e.g.,
# <scipy.stats._continuous_distns.trapezoid_gen at 0x1df83bbc688>
expected = set(filter(lambda s: not str(s).startswith('<'), expected))
assert actual == expected
class TestVonMises:
@pytest.mark.parametrize('k', [0.1, 1, 101])
@pytest.mark.parametrize('x', [0, 1, np.pi, 10, 100])
def test_vonmises_periodic(self, k, x):
def check_vonmises_pdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2 * np.pi * s)))
def check_vonmises_cdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.cdf(x) % 1,
vm.cdf(x % (2 * np.pi * s)) % 1)
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support(self):
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical(self):
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
# Expected values of the vonmises PDF were computed using
# mpmath with 50 digits of precision:
#
# def vmpdf_mp(x, kappa):
# x = mpmath.mpf(x)
# kappa = mpmath.mpf(kappa)
# num = mpmath.exp(kappa*mpmath.cos(x))
# den = 2 * mpmath.pi * mpmath.besseli(0, kappa)
# return num/den
@pytest.mark.parametrize('x, kappa, expected_pdf',
[(0.1, 0.01, 0.16074242744907072),
(0.1, 25.0, 1.7515464099118245),
(0.1, 800, 0.2073272544458798),
(2.0, 0.01, 0.15849003875385817),
(2.0, 25.0, 8.356882934278192e-16),
(2.0, 800, 0.0)])
def test_vonmises_pdf(self, x, kappa, expected_pdf):
pdf = stats.vonmises.pdf(x, kappa)
assert_allclose(pdf, expected_pdf, rtol=1e-15)
# Expected values of the vonmises entropy were computed using
# mpmath with 50 digits of precision:
#
# def vonmises_entropy(kappa):
# kappa = mpmath.mpf(kappa)
# return (-kappa * mpmath.besseli(1, kappa) /
# mpmath.besseli(0, kappa) + mpmath.log(2 * mpmath.pi *
# mpmath.besseli(0, kappa)))
# >>> float(vonmises_entropy(kappa))
@pytest.mark.parametrize('kappa, expected_entropy',
[(1, 1.6274014590199897),
(5, 0.6756431570114528),
(100, -0.8811275441649473),
(1000, -2.03468891852547),
(2000, -2.3813876496587847)])
def test_vonmises_entropy(self, kappa, expected_entropy):
entropy = stats.vonmises.entropy(kappa)
assert_allclose(entropy, expected_entropy, rtol=1e-13)
def test_vonmises_rvs_gh4598(self):
# check that random variates wrap around as discussed in gh-4598
seed = abs(hash('von_mises_rvs'))
rng1 = np.random.default_rng(seed)
rng2 = np.random.default_rng(seed)
rng3 = np.random.default_rng(seed)
rvs1 = stats.vonmises(1, loc=0, scale=1).rvs(random_state=rng1)
rvs2 = stats.vonmises(1, loc=2*np.pi, scale=1).rvs(random_state=rng2)
rvs3 = stats.vonmises(1, loc=0,
scale=(2*np.pi/abs(rvs1)+1)).rvs(random_state=rng3)
assert_allclose(rvs1, rvs2, atol=1e-15)
assert_allclose(rvs1, rvs3, atol=1e-15)
# Expected values of the vonmises LOGPDF were computed
# using wolfram alpha:
# kappa * cos(x) - log(2*pi*I0(kappa))
@pytest.mark.parametrize('x, kappa, expected_logpdf',
[(0.1, 0.01, -1.8279520246003170),
(0.1, 25.0, 0.5604990605420549),
(0.1, 800, -1.5734567947337514),
(2.0, 0.01, -1.8420635346185686),
(2.0, 25.0, -34.7182759850871489),
(2.0, 800, -1130.4942582548682739)])
def test_vonmises_logpdf(self, x, kappa, expected_logpdf):
logpdf = stats.vonmises.logpdf(x, kappa)
assert_allclose(logpdf, expected_logpdf, rtol=1e-15)
def test_vonmises_expect(self):
"""
Test that the vonmises expectation values are
computed correctly. This test checks that the
numeric integration estimates the correct normalization
(1) and mean angle (loc). These expectations are
independent of the chosen 2pi interval.
"""
rng = np.random.default_rng(6762668991392531563)
loc, kappa, lb = rng.random(3) * 10
res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1)
assert_allclose(res, 1)
assert np.issubdtype(res.dtype, np.floating)
bounds = lb, lb + 2 * np.pi
res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1, *bounds)
assert_allclose(res, 1)
assert np.issubdtype(res.dtype, np.floating)
bounds = lb, lb + 2 * np.pi
res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: np.exp(1j*x),
*bounds, complex_func=1)
assert_allclose(np.angle(res), loc % (2*np.pi))
assert np.issubdtype(res.dtype, np.complexfloating)
@pytest.mark.xslow
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_shape", [1, 100, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_shape', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_shape,
fix_loc, fix_shape):
if fix_shape and fix_loc:
pytest.skip("Nothing to fit.")
rng = np.random.default_rng(6762668991392531563)
data = stats.vonmises.rvs(rvs_shape, size=1000, loc=rvs_loc,
random_state=rng)
kwds = {'fscale': 1}
if fix_loc:
kwds['floc'] = rvs_loc
if fix_shape:
kwds['f0'] = rvs_shape
_assert_less_or_close_loglike(stats.vonmises, data,
stats.vonmises.nnlf, **kwds)
@pytest.mark.parametrize('loc', [-0.5 * np.pi, 0, np.pi])
@pytest.mark.parametrize('kappa_tol', [(1e-1, 5e-2), (1e2, 1e-2),
(1e5, 1e-2)])
def test_vonmises_fit_all(self, kappa_tol, loc):
rng = np.random.default_rng(6762668991392531563)
kappa, tol = kappa_tol
data = stats.vonmises(loc=loc, kappa=kappa).rvs(100000,
random_state=rng)
kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data)
assert scale_fit == 1
loc_vec = np.array([np.cos(loc), np.sin(loc)])
loc_fit_vec = np.array([np.cos(loc_fit), np.sin(loc_fit)])
angle = np.arccos(loc_vec.dot(loc_fit_vec))
assert_allclose(angle, 0, atol=tol, rtol=0)
assert_allclose(kappa, kappa_fit, rtol=tol)
def test_vonmises_fit_shape(self):
rng = np.random.default_rng(6762668991392531563)
loc = 0.25*np.pi
kappa = 10
data = stats.vonmises(loc=loc, kappa=kappa).rvs(100000, random_state=rng)
kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data, floc=loc)
assert loc_fit == loc
assert scale_fit == 1
assert_allclose(kappa, kappa_fit, rtol=1e-2)
@pytest.mark.xslow
@pytest.mark.parametrize('loc', [-0.5 * np.pi, -0.9 * np.pi])
def test_vonmises_fit_bad_floc(self, loc):
data = [-0.92923506, -0.32498224, 0.13054989, -0.97252014, 2.79658071,
-0.89110948, 1.22520295, 1.44398065, 2.49163859, 1.50315096,
3.05437696, -2.73126329, -3.06272048, 1.64647173, 1.94509247,
-1.14328023, 0.8499056, 2.36714682, -1.6823179, -0.88359996]
data = np.asarray(data)
kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data, floc=loc)
assert kappa_fit == np.finfo(float).tiny
_assert_less_or_close_loglike(stats.vonmises, data,
stats.vonmises.nnlf, fscale=1, floc=loc)
@pytest.mark.parametrize('sign', [-1, 1])
def test_vonmises_fit_unwrapped_data(self, sign):
rng = np.random.default_rng(6762668991392531563)
data = stats.vonmises(loc=sign*0.5*np.pi, kappa=10).rvs(100000,
random_state=rng)
shifted_data = data + 4*np.pi
kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data)
kappa_fit_shifted, loc_fit_shifted, _ = stats.vonmises.fit(shifted_data)
assert_allclose(loc_fit, loc_fit_shifted)
assert_allclose(kappa_fit, kappa_fit_shifted)
assert scale_fit == 1
assert -np.pi < loc_fit < np.pi
def _assert_less_or_close_loglike(dist, data, func=None, **kwds):
"""
This utility function checks that the negative log-likelihood function
(or `func`) of the result computed using dist.fit() is less than or equal
to the result computed using the generic fit method. Because of
normal numerical imprecision, the "equality" check is made using
`np.allclose` with a relative tolerance of 1e-15.
"""
if func is None:
func = dist.nnlf
mle_analytical = dist.fit(data, **kwds)
numerical_opt = super(type(dist), dist).fit(data, **kwds)
ll_mle_analytical = func(mle_analytical, data)
ll_numerical_opt = func(numerical_opt, data)
assert (ll_mle_analytical <= ll_numerical_opt or
np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15))
# Ideally we'd check that shapes are correctly fixed, too, but that is
# complicated by the many ways of fixing them (e.g. f0, fix_a, fa).
if 'floc' in kwds:
assert mle_analytical[-2] == kwds['floc']
if 'fscale' in kwds:
assert mle_analytical[-1] == kwds['fscale']
def assert_fit_warnings(dist):
param = ['floc', 'fscale']
if dist.shapes:
nshapes = len(dist.shapes.split(","))
param += ['f0', 'f1', 'f2'][:nshapes]
all_fixed = dict(zip(param, np.arange(len(param))))
data = [1, 2, 3]
with pytest.raises(RuntimeError,
match="All parameters fixed. There is nothing "
"to optimize."):
dist.fit(data, **all_fixed)
with pytest.raises(ValueError,
match="The data contains non-finite values"):
dist.fit([np.nan])
with pytest.raises(ValueError,
match="The data contains non-finite values"):
dist.fit([np.inf])
with pytest.raises(TypeError, match="Unknown keyword arguments:"):
dist.fit(data, extra_keyword=2)
with pytest.raises(TypeError, match="Too many positional arguments."):
dist.fit(data, *[1]*(len(param) - 1))
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gibrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
def test_ppf_p1(self):
# Check that gh-17388 is resolved: PPF == n when p = 1
n = 4
assert stats.binom.ppf(q=0.3, n=n, p=1.0) == n
def test_pmf_poisson(self):
# Check that gh-17146 is resolved: binom -> poisson
n = 1541096362225563.0
p = 1.0477878413173978e-18
x = np.arange(3)
res = stats.binom.pmf(x, n=n, p=p)
ref = stats.poisson.pmf(x, n * p)
assert_allclose(res, ref, atol=1e-16)
def test_pmf_cdf(self):
# Check that gh-17809 is resolved: binom.pmf(0) ~ binom.cdf(0)
n = 25.0 * 10 ** 21
p = 1.0 * 10 ** -21
r = 0
res = stats.binom.pmf(r, n, p)
ref = stats.binom.cdf(r, n, p)
assert_allclose(res, ref, atol=1e-16)
def test_pmf_gh15101(self):
# Check that gh-15101 is resolved (no divide warnings when p~1, n~oo)
res = stats.binom.pmf(3, 2000, 0.999)
assert_allclose(res, 0, atol=1e-16)
class TestArcsine:
def test_endpoints(self):
# Regression test for gh-13697. The following calculation
# should not generate a warning.
p = stats.arcsine.pdf([0, 1])
assert_equal(p, [np.inf, np.inf])
class TestBernoulli:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford:
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestChi:
# "Exact" value of chi.sf(10, 4), as computed by Wolfram Alpha with
# 1 - CDF[ChiDistribution[4], 10]
CHI_SF_10_4 = 9.83662422461598e-21
# "Exact" value of chi.mean(df=1000) as computed by Wolfram Alpha with
# Mean[ChiDistribution[1000]]
CHI_MEAN_1000 = 31.614871896980
def test_sf(self):
s = stats.chi.sf(10, 4)
assert_allclose(s, self.CHI_SF_10_4, rtol=1e-15)
def test_isf(self):
x = stats.chi.isf(self.CHI_SF_10_4, 4)
assert_allclose(x, 10, rtol=1e-15)
def test_mean(self):
x = stats.chi.mean(df=1000)
assert_allclose(x, self.CHI_MEAN_1000, rtol=1e-12)
# Entropy references values were computed with the following mpmath code
# from mpmath import mp
# mp.dps = 50
# def chi_entropy_mpmath(df):
# df = mp.mpf(df)
# half_df = 0.5 * df
# entropy = mp.log(mp.gamma(half_df)) + 0.5 * \
# (df - mp.log(2) - (df - mp.one) * mp.digamma(half_df))
# return float(entropy)
@pytest.mark.parametrize('df, ref',
[(1e-4, -9989.7316027504),
(1, 0.7257913526447274),
(1e3, 1.0721981095025448),
(1e10, 1.0723649429080335),
(1e100, 1.0723649429247002)])
def test_entropy(self, df, ref):
assert_allclose(stats.chi(df).entropy(), ref, rtol=1e-15)
class TestNBinom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
def test_logcdf_gh16159(self):
# check that gh16159 is resolved.
vals = stats.nbinom.logcdf([0, 5, 0, 5], n=4.8, p=0.45)
ref = np.log(stats.nbinom.cdf([0, 5, 0, 5], n=4.8, p=0.45))
assert_allclose(vals, ref)
class TestGenInvGauss:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestGenHyperbolic:
def setup_method(self):
np.random.seed(1234)
def test_pdf_r(self):
# test against R package GeneralizedHyperbolic
# x <- seq(-10, 10, length.out = 10)
# GeneralizedHyperbolic::dghyp(
# x = x, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
# )
vals_R = np.array([
2.94895678275316e-13, 1.75746848647696e-10, 9.48149804073045e-08,
4.17862521692026e-05, 0.0103947630463822, 0.240864958986839,
0.162833527161649, 0.0374609592899472, 0.00634894847327781,
0.000941920705790324
])
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(-10, 10, 10)
assert_allclose(gh.pdf(x), vals_R, atol=0, rtol=1e-13)
def test_cdf_r(self):
# test against R package GeneralizedHyperbolic
# q <- seq(-10, 10, length.out = 10)
# GeneralizedHyperbolic::pghyp(
# q = q, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
# )
vals_R = np.array([
1.01881590921421e-13, 6.13697274983578e-11, 3.37504977637992e-08,
1.55258698166181e-05, 0.00447005453832497, 0.228935323956347,
0.755759458895243, 0.953061062884484, 0.992598013917513,
0.998942646586662
])
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(-10, 10, 10)
assert_allclose(gh.cdf(x), vals_R, atol=0, rtol=1e-6)
# The reference values were computed by implementing the PDF with mpmath
# and integrating it with mp.quad. The values were computed with
# mp.dps=250, and then again with mp.dps=400 to ensure the full 64 bit
# precision was computed.
@pytest.mark.parametrize(
'x, p, a, b, loc, scale, ref',
[(-15, 2, 3, 1.5, 0.5, 1.5, 4.770036428808252e-20),
(-15, 10, 1.5, 0.25, 1, 5, 0.03282964575089294),
(-15, 10, 1.5, 1.375, 0, 1, 3.3711159600215594e-23),
(-15, 0.125, 1.5, 1.49995, 0, 1, 4.729401428898605e-23),
(-1, 0.125, 1.5, 1.49995, 0, 1, 0.0003565725914786859),
(5, -0.125, 1.5, 1.49995, 0, 1, 0.2600651974023352),
(5, -0.125, 1000, 999, 0, 1, 5.923270556517253e-28),
(20, -0.125, 1000, 999, 0, 1, 0.23452293711665634),
(40, -0.125, 1000, 999, 0, 1, 0.9999648749561968),
(60, -0.125, 1000, 999, 0, 1, 0.9999999999975475)]
)
def test_cdf_mpmath(self, x, p, a, b, loc, scale, ref):
cdf = stats.genhyperbolic.cdf(x, p, a, b, loc=loc, scale=scale)
assert_allclose(cdf, ref, rtol=5e-12)
# The reference values were computed by implementing the PDF with mpmath
# and integrating it with mp.quad. The values were computed with
# mp.dps=250, and then again with mp.dps=400 to ensure the full 64 bit
# precision was computed.
@pytest.mark.parametrize(
'x, p, a, b, loc, scale, ref',
[(0, 1e-6, 12, -1, 0, 1, 0.38520358671350524),
(-1, 3, 2.5, 2.375, 1, 3, 0.9999901774267577),
(-20, 3, 2.5, 2.375, 1, 3, 1.0),
(25, 2, 3, 1.5, 0.5, 1.5, 8.593419916523976e-10),
(300, 10, 1.5, 0.25, 1, 5, 6.137415609872158e-24),
(60, -0.125, 1000, 999, 0, 1, 2.4524915075944173e-12),
(75, -0.125, 1000, 999, 0, 1, 2.9435194886214633e-18)]
)
def test_sf_mpmath(self, x, p, a, b, loc, scale, ref):
sf = stats.genhyperbolic.sf(x, p, a, b, loc=loc, scale=scale)
assert_allclose(sf, ref, rtol=5e-12)
def test_moments_r(self):
# test against R package GeneralizedHyperbolic
# sapply(1:4,
# function(x) GeneralizedHyperbolic::ghypMom(
# order = x, lambda = 2, alpha = 2,
# beta = 1, delta = 1.5, mu = 0.5,
# momType = 'raw')
# )
vals_R = [2.36848366948115, 8.4739346779246,
37.8870502710066, 205.76608511485]
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
vals_us = [
stats.genhyperbolic(*args, loc=mu, scale=delta).moment(i)
for i in range(1, 5)
]
assert_allclose(vals_us, vals_R, atol=0, rtol=1e-13)
def test_rvs(self):
# Kolmogorov-Smirnov test to ensure alignment
# of analytical and empirical cdfs
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
_, p = stats.kstest(gh.rvs(size=1500, random_state=1234), gh.cdf)
assert_equal(p > 0.05, True)
def test_pdf_t(self):
# Test Against T-Student with 1 - 30 df
df = np.linspace(1, 30, 10)
# in principle alpha should be zero in practice for big lmbdas
# alpha cannot be too small else pdf does not integrate
alpha, beta = np.float_power(df, 2)*np.finfo(np.float32).eps, 0
mu, delta = 0, np.sqrt(df)
args = (-df/2, alpha, beta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.t.pdf(x, df),
atol=0, rtol=1e-6
)
def test_pdf_cauchy(self):
# Test Against Cauchy distribution
# in principle alpha should be zero in practice for big lmbdas
# alpha cannot be too small else pdf does not integrate
lmbda, alpha, beta = -0.5, np.finfo(np.float32).eps, 0
mu, delta = 0, 1
args = (lmbda, alpha, beta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.cauchy.pdf(x),
atol=0, rtol=1e-6
)
def test_pdf_laplace(self):
# Test Against Laplace with location param [-10, 10]
loc = np.linspace(-10, 10, 10)
# in principle delta should be zero in practice for big loc delta
# cannot be too small else pdf does not integrate
delta = np.finfo(np.float32).eps
lmbda, alpha, beta = 1, 1, 0
args = (lmbda, alpha*delta, beta*delta)
# ppf does not integrate for scale < 5e-4
# therefore using simple linspace to define the support
gh = stats.genhyperbolic(*args, loc=loc, scale=delta)
x = np.linspace(-20, 20, 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.laplace.pdf(x, loc=loc, scale=1),
atol=0, rtol=1e-11
)
def test_pdf_norminvgauss(self):
# Test Against NIG with varying alpha/beta/delta/mu
alpha, beta, delta, mu = (
np.linspace(1, 20, 10),
np.linspace(0, 19, 10)*np.float_power(-1, range(10)),
np.linspace(1, 1, 10),
np.linspace(-100, 100, 10)
)
lmbda = - 0.5
args = (lmbda, alpha * delta, beta * delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.norminvgauss.pdf(
x, a=alpha, b=beta, loc=mu, scale=delta),
atol=0, rtol=1e-13
)
class TestHypSecant:
# Reference values were computed with the mpmath expression
# float((2/mp.pi)*mp.atan(mp.exp(-x)))
# and mp.dps = 50.
@pytest.mark.parametrize('x, reference',
[(30, 5.957247804324683e-14),
(50, 1.2278802891647964e-22)])
def test_sf(self, x, reference):
sf = stats.hypsecant.sf(x)
assert_allclose(sf, reference, rtol=5e-15)
# Reference values were computed with the mpmath expression
# float(-mp.log(mp.tan((mp.pi/2)*p)))
# and mp.dps = 50.
@pytest.mark.parametrize('p, reference',
[(1e-6, 13.363927852673998),
(1e-12, 27.179438410639094)])
def test_isf(self, p, reference):
x = stats.hypsecant.isf(p)
assert_allclose(x, reference, rtol=5e-15)
class TestNormInvGauss:
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
@pytest.mark.parametrize('x, a, b, sf, rtol',
[(-1, 1, 0, 0.8759652211005315, 1e-13),
(25, 1, 0, 1.1318690184042579e-13, 1e-4),
(1, 5, -1.5, 0.002066711134653577, 1e-12),
(10, 5, -1.5, 2.308435233930669e-29, 1e-9)])
def test_sf_isf_mpmath(self, x, a, b, sf, rtol):
# Reference data generated with `reference_distributions.NormInvGauss`,
# e.g. `NormInvGauss(alpha=1, beta=0).sf(-1)` with mp.dps = 50
s = stats.norminvgauss.sf(x, a, b)
assert_allclose(s, sf, rtol=rtol)
i = stats.norminvgauss.isf(sf, a, b)
assert_allclose(i, x, rtol=rtol)
def test_sf_isf_mpmath_vectorized(self):
x = [-1, 25]
a = [1, 1]
b = 0
sf = [0.8759652211005315, 1.1318690184042579e-13] # see previous test
s = stats.norminvgauss.sf(x, a, b)
assert_allclose(s, sf, rtol=1e-13, atol=1e-16)
i = stats.norminvgauss.isf(sf, a, b)
# Not perfect, but better than it was. See gh-13338.
assert_allclose(i, x, rtol=1e-6)
def test_gh8718(self):
# Add test that gh-13338 resolved gh-8718
dst = stats.norminvgauss(1, 0)
x = np.arange(0, 20, 2)
sf = dst.sf(x)
isf = dst.isf(sf)
assert_allclose(isf, x)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_rvs_9313(self):
# previously, RVS were converted to `np.int32` on some platforms,
# causing overflow for moderately large integer output (gh-9313).
# Check that this is resolved to the extent possible w/ `np.int64`.
rng = np.random.default_rng(649496242618848)
rvs = stats.geom.rvs(np.exp(-35), size=5, random_state=rng)
assert rvs.dtype == np.int64
assert np.all(rvs > np.iinfo(np.int32).max)
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
def test_entropy_gh18226(self):
# gh-18226 reported that `geom.entropy` produced a warning and
# inaccurate output for small p. Check that this is resolved.
h = stats.geom(0.0146).entropy()
assert_allclose(h, 5.219397961962308, rtol=1e-15)
class TestPlanck:
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm:
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_rvs(self):
np.random.seed(0)
# 0 < beta < 1
dist = stats.gennorm(0.5)
rvs = dist.rvs(size=1000)
assert stats.kstest(rvs, dist.cdf).pvalue > 0.1
# beta = 1
dist = stats.gennorm(1)
rvs = dist.rvs(size=1000)
rvs_laplace = stats.laplace.rvs(size=1000)
assert stats.ks_2samp(rvs, rvs_laplace).pvalue > 0.1
# beta = 2
dist = stats.gennorm(2)
rvs = dist.rvs(size=1000)
rvs_norm = stats.norm.rvs(scale=1/2**0.5, size=1000)
assert stats.ks_2samp(rvs, rvs_norm).pvalue > 0.1
def test_rvs_broadcasting(self):
np.random.seed(0)
dist = stats.gennorm([[0.5, 1.], [2., 5.]])
rvs = dist.rvs(size=[1000, 2, 2])
assert stats.kstest(rvs[:, 0, 0], stats.gennorm(0.5).cdf)[1] > 0.1
assert stats.kstest(rvs[:, 0, 1], stats.gennorm(1.0).cdf)[1] > 0.1
assert stats.kstest(rvs[:, 1, 0], stats.gennorm(2.0).cdf)[1] > 0.1
assert stats.kstest(rvs[:, 1, 1], stats.gennorm(5.0).cdf)[1] > 0.1
class TestGibrat:
# sfx is sf(x). The values were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 100
# def gibrat_sf(x):
# return 1 - mp.ncdf(mp.log(x))
#
# E.g.
#
# >>> float(gibrat_sf(1.5))
# 0.3425678305148459
#
@pytest.mark.parametrize('x, sfx', [(1.5, 0.3425678305148459),
(5000, 8.173334352522493e-18)])
def test_sf_isf(self, x, sfx):
assert_allclose(stats.gibrat.sf(x), sfx, rtol=2e-14)
assert_allclose(stats.gibrat.isf(sfx), x, rtol=2e-14)
class TestGompertz:
def test_gompertz_accuracy(self):
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
# sfx is sf(x). The values were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 100
# def gompertz_sf(x, c):
# reurn mp.exp(-c*mp.expm1(x))
#
# E.g.
#
# >>> float(gompertz_sf(1, 2.5))
# 0.013626967146253437
#
@pytest.mark.parametrize('x, c, sfx', [(1, 2.5, 0.013626967146253437),
(3, 2.5, 1.8973243273704087e-21),
(0.05, 5, 0.7738668242570479),
(2.25, 5, 3.707795833465481e-19)])
def test_sf_isf(self, x, c, sfx):
assert_allclose(stats.gompertz.sf(x, c), sfx, rtol=1e-14)
assert_allclose(stats.gompertz.isf(sfx, c), x, rtol=1e-14)
# reference values were computed with mpmath
# from mpmath import mp
# mp.dps = 100
# def gompertz_entropy(c):
# c = mp.mpf(c)
# return float(mp.one - mp.log(c) - mp.exp(c)*mp.e1(c))
@pytest.mark.parametrize('c, ref', [(1e-4, 1.5762523017634573),
(1, 0.4036526376768059),
(1000, -5.908754280976161),
(1e10, -22.025850930040455)])
def test_entropy(self, c, ref):
assert_allclose(stats.gompertz.entropy(c), ref, rtol=1e-14)
class TestFoldNorm:
# reference values were computed with mpmath with 50 digits of precision
# from mpmath import mp
# mp.dps = 50
# mp.mpf(0.5) * (mp.erf((x - c)/mp.sqrt(2)) + mp.erf((x + c)/mp.sqrt(2)))
@pytest.mark.parametrize('x, c, ref', [(1e-4, 1e-8, 7.978845594730578e-05),
(1e-4, 1e-4, 7.97884555483635e-05)])
def test_cdf(self, x, c, ref):
assert_allclose(stats.foldnorm.cdf(x, c), ref, rtol=1e-15)
class TestHalfNorm:
# sfx is sf(x). The values were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 100
# def halfnorm_sf(x):
# reurn 2*(1 - mp.ncdf(x))
#
# E.g.
#
# >>> float(halfnorm_sf(1))
# 0.3173105078629141
#
@pytest.mark.parametrize('x, sfx', [(1, 0.3173105078629141),
(10, 1.523970604832105e-23)])
def test_sf_isf(self, x, sfx):
assert_allclose(stats.halfnorm.sf(x), sfx, rtol=1e-14)
assert_allclose(stats.halfnorm.isf(sfx), x, rtol=1e-14)
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 100
# def halfnorm_cdf_mpmath(x):
# x = mp.mpf(x)
# return float(mp.erf(x/mp.sqrt(2.)))
@pytest.mark.parametrize('x, ref', [(1e-40, 7.978845608028653e-41),
(1e-18, 7.978845608028654e-19),
(8, 0.9999999999999988)])
def test_cdf(self, x, ref):
assert_allclose(stats.halfnorm.cdf(x), ref, rtol=1e-15)
@pytest.mark.parametrize("rvs_loc", [1e-5, 1e10])
@pytest.mark.parametrize("rvs_scale", [1e-2, 100, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale,
fix_loc, fix_scale):
rng = np.random.default_rng(6762668991392531563)
data = stats.halfnorm.rvs(loc=rvs_loc, scale=rvs_scale, size=1000,
random_state=rng)
if fix_loc and fix_scale:
error_msg = ("All parameters fixed. There is nothing to "
"optimize.")
with pytest.raises(RuntimeError, match=error_msg):
stats.halflogistic.fit(data, floc=rvs_loc, fscale=rvs_scale)
return
kwds = {}
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.halfnorm, data, **kwds)
def test_fit_error(self):
# `floc` bigger than the minimal data point
with pytest.raises(FitDataError):
stats.halfnorm.fit([1, 2, 3], floc=2)
class TestHalfCauchy:
@pytest.mark.parametrize("rvs_loc", [1e-5, 1e10])
@pytest.mark.parametrize("rvs_scale", [1e-2, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale,
fix_loc, fix_scale):
rng = np.random.default_rng(6762668991392531563)
data = stats.halfnorm.rvs(loc=rvs_loc, scale=rvs_scale, size=1000,
random_state=rng)
if fix_loc and fix_scale:
error_msg = ("All parameters fixed. There is nothing to "
"optimize.")
with pytest.raises(RuntimeError, match=error_msg):
stats.halfcauchy.fit(data, floc=rvs_loc, fscale=rvs_scale)
return
kwds = {}
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.halfcauchy, data, **kwds)
def test_fit_error(self):
# `floc` bigger than the minimal data point
with pytest.raises(FitDataError):
stats.halfcauchy.fit([1, 2, 3], floc=2)
class TestHalfLogistic:
# survival function reference values were computed with mpmath
# from mpmath import mp
# mp.dps = 50
# def sf_mpmath(x):
# x = mp.mpf(x)
# return float(mp.mpf(2.)/(mp.exp(x) + mp.one))
@pytest.mark.parametrize('x, ref', [(100, 7.440151952041672e-44),
(200, 2.767793053473475e-87)])
def test_sf(self, x, ref):
assert_allclose(stats.halflogistic.sf(x), ref, rtol=1e-15)
# inverse survival function reference values were computed with mpmath
# from mpmath import mp
# mp.dps = 200
# def isf_mpmath(x):
# halfx = mp.mpf(x)/2
# return float(-mp.log(halfx/(mp.one - halfx)))
@pytest.mark.parametrize('q, ref', [(7.440151952041672e-44, 100),
(2.767793053473475e-87, 200),
(1-1e-9, 1.999999943436137e-09),
(1-1e-15, 1.9984014443252818e-15)])
def test_isf(self, q, ref):
assert_allclose(stats.halflogistic.isf(q), ref, rtol=1e-15)
@pytest.mark.parametrize("rvs_loc", [1e-5, 1e10])
@pytest.mark.parametrize("rvs_scale", [1e-2, 100, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale,
fix_loc, fix_scale):
rng = np.random.default_rng(6762668991392531563)
data = stats.halflogistic.rvs(loc=rvs_loc, scale=rvs_scale, size=1000,
random_state=rng)
kwds = {}
if fix_loc and fix_scale:
error_msg = ("All parameters fixed. There is nothing to "
"optimize.")
with pytest.raises(RuntimeError, match=error_msg):
stats.halflogistic.fit(data, floc=rvs_loc, fscale=rvs_scale)
return
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.halflogistic, data, **kwds)
def test_fit_bad_floc(self):
msg = r" Maximum likelihood estimation with 'halflogistic' requires"
with assert_raises(FitDataError, match=msg):
stats.halflogistic.fit([0, 2, 4], floc=1)
class TestHalfgennorm:
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestLaplaceasymmetric:
def test_laplace(self):
# test against Laplace (special case for kappa=1)
points = np.array([1, 2, 3])
pdf1 = stats.laplace_asymmetric.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_allclose(pdf1, pdf2)
def test_asymmetric_laplace_pdf(self):
# test assymetric Laplace
points = np.array([1, 2, 3])
kappa = 2
kapinv = 1/kappa
pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
pdf2 = stats.laplace_asymmetric.pdf(points*(kappa**2), kapinv)
assert_allclose(pdf1, pdf2)
def test_asymmetric_laplace_log_10_16(self):
# test assymetric Laplace
points = np.array([-np.log(16), np.log(10)])
kappa = 2
pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
cdf1 = stats.laplace_asymmetric.cdf(points, kappa)
sf1 = stats.laplace_asymmetric.sf(points, kappa)
pdf2 = np.array([1/10, 1/250])
cdf2 = np.array([1/5, 1 - 1/500])
sf2 = np.array([4/5, 1/500])
ppf1 = stats.laplace_asymmetric.ppf(cdf2, kappa)
ppf2 = points
isf1 = stats.laplace_asymmetric.isf(sf2, kappa)
isf2 = points
assert_allclose(np.concatenate((pdf1, cdf1, sf1, ppf1, isf1)),
np.concatenate((pdf2, cdf2, sf2, ppf2, isf2)))
class TestTruncnorm:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("a, b, ref",
[(0, 100, 0.7257913526447274),
(0.6, 0.7, -2.3027610681852573),
(1e-06, 2e-06, -13.815510557964274)])
def test_entropy(self, a, b, ref):
# All reference values were calculated with mpmath:
# import numpy as np
# from mpmath import mp
# mp.dps = 50
# def entropy_trun(a, b):
# a, b = mp.mpf(a), mp.mpf(b)
# Z = mp.ncdf(b) - mp.ncdf(a)
#
# def pdf(x):
# return mp.npdf(x) / Z
#
# res = -mp.quad(lambda t: pdf(t) * mp.log(pdf(t)), [a, b])
# return np.float64(res)
assert_allclose(stats.truncnorm.entropy(a, b), ref, rtol=1e-10)
@pytest.mark.parametrize("a, b, ref",
[(1e-11, 10000000000.0, 0.725791352640738),
(1e-100, 1e+100, 0.7257913526447274),
(-1e-100, 1e+100, 0.7257913526447274),
(-1e+100, 1e+100, 1.4189385332046727)])
def test_extreme_entropy(self, a, b, ref):
# The reference values were calculated with mpmath
# import numpy as np
# from mpmath import mp
# mp.dps = 50
# def trunc_norm_entropy(a, b):
# a, b = mp.mpf(a), mp.mpf(b)
# Z = mp.ncdf(b) - mp.ncdf(a)
# A = mp.log(mp.sqrt(2 * mp.pi * mp.e) * Z)
# B = (a * mp.npdf(a) - b * mp.npdf(b)) / (2 * Z)
# return np.float64(A + B)
assert_allclose(stats.truncnorm.entropy(a, b), ref, rtol=1e-14)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = -10001, -10000
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16,
3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high),
stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high),
stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high),
stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def test_cdf_tail_15110_14753(self):
# Check accuracy issues reported in gh-14753 and gh-155110
# Ground truth values calculated using Wolfram Alpha, e.g.
# (CDF[NormalDistribution[0,1],83/10]-CDF[NormalDistribution[0,1],8])/
# (1 - CDF[NormalDistribution[0,1],8])
assert_allclose(stats.truncnorm(13., 15.).cdf(14.),
0.9999987259565643)
assert_allclose(stats.truncnorm(8, np.inf).cdf(8.3),
0.9163220907327540)
# Test data for the truncnorm stats() method.
# The data in each row is:
# a, b, mean, variance, skewness, excess kurtosis. Generated using
# https://gist.github.com/WarrenWeckesser/636b537ee889679227d53543d333a720
_truncnorm_stats_data = [
[-30, 30,
0.0, 1.0, 0.0, 0.0],
[-10, 10,
0.0, 1.0, 0.0, -1.4927521335810455e-19],
[-3, 3,
0.0, 0.9733369246625415, 0.0, -0.17111443639774404],
[-2, 2,
0.0, 0.7737413035499232, 0.0, -0.6344632828703505],
[0, np.inf,
0.7978845608028654,
0.3633802276324187,
0.995271746431156,
0.8691773036059741],
[-np.inf, 0,
-0.7978845608028654,
0.3633802276324187,
-0.995271746431156,
0.8691773036059741],
[-1, 3,
0.282786110727154,
0.6161417353578293,
0.5393018494027877,
-0.20582065135274694],
[-3, 1,
-0.282786110727154,
0.6161417353578293,
-0.5393018494027877,
-0.20582065135274694],
[-10, -9,
-9.108456288012409,
0.011448805821636248,
-1.8985607290949496,
5.0733461105025075],
]
_truncnorm_stats_data = np.array(_truncnorm_stats_data)
@pytest.mark.parametrize("case", _truncnorm_stats_data)
def test_moments(self, case):
a, b, m0, v0, s0, k0 = case
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_allclose([m, v, s, k], [m0, v0, s0, k0], atol=1e-17)
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_11299_rvs(self):
# Arose from investigating gh-11299
# Test multiple shape parameters simultaneously.
low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40]
high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf]
x = stats.truncnorm.rvs(low, high, size=(5, len(low)))
assert np.shape(x) == (5, len(low))
assert_(np.all(low <= x.min(axis=0)))
assert_(np.all(x.max(axis=0) <= high))
def test_rvs_Generator(self):
# check that rvs can use a Generator
if hasattr(np.random, "default_rng"):
stats.truncnorm.rvs(-10, -5, size=5,
random_state=np.random.default_rng())
def test_logcdf_gh17064(self):
# regression test for gh-17064 - avoid roundoff error for logcdfs ~0
a = np.array([-np.inf, -np.inf, -8, -np.inf, 10])
b = np.array([np.inf, np.inf, 8, 10, np.inf])
x = np.array([10, 7.5, 7.5, 9, 20])
expected = [-7.619853024160525e-24, -3.190891672910947e-14,
-3.128682067168231e-14, -1.1285122074235991e-19,
-3.61374964828753e-66]
assert_allclose(stats.truncnorm(a, b).logcdf(x), expected)
assert_allclose(stats.truncnorm(-b, -a).logsf(-x), expected)
def test_moments_gh18634(self):
# gh-18634 reported that moments 5 and higher didn't work; check that
# this is resolved
res = stats.truncnorm(-2, 3).moment(5)
# From Mathematica:
# Moment[TruncatedDistribution[{-2, 3}, NormalDistribution[]], 5]
ref = 1.645309620208361
assert_allclose(res, ref)
class TestGenLogistic:
# Expected values computed with mpmath with 50 digits of precision.
@pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917),
(-125, -187.09453489189184),
(0, -1.3274028432916989),
(100, -99.59453489189184),
(1000, -999.5945348918918)])
def test_logpdf(self, x, expected):
c = 1.5
logp = stats.genlogistic.logpdf(x, c)
assert_allclose(logp, expected, rtol=1e-13)
# Expected values computed with mpmath with 50 digits of precision
# from mpmath import mp
# mp.dps = 50
# def entropy_mp(c):
# c = mp.mpf(c)
# return float(-mp.log(c)+mp.one+mp.digamma(c + mp.one) + mp.euler)
@pytest.mark.parametrize('c, ref', [(1e-100, 231.25850929940458),
(1e-4, 10.21050485336338),
(1e8, 1.577215669901533),
(1e100, 1.5772156649015328)])
def test_entropy(self, c, ref):
assert_allclose(stats.genlogistic.entropy(c), ref, rtol=5e-15)
# Expected values computed with mpmath with 50 digits of precision
# from mpmath import mp
# mp.dps = 1000
#
# def genlogistic_cdf_mp(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return (mp.one + mp.exp(-x)) ** (-c)
#
# def genlogistic_sf_mp(x, c):
# return mp.one - genlogistic_cdf_mp(x, c)
#
# x, c, ref = 100, 0.02, -7.440151952041672e-466
# print(float(mp.log(genlogistic_cdf_mp(x, c))))
# ppf/isf reference values generated by passing in `ref` (`q` is produced)
@pytest.mark.parametrize('x, c, ref', [(200, 10, 1.3838965267367375e-86),
(500, 20, 1.424915281348257e-216)])
def test_sf(self, x, c, ref):
assert_allclose(stats.genlogistic.sf(x, c), ref, rtol=1e-14)
@pytest.mark.parametrize('q, c, ref', [(0.01, 200, 9.898441467379765),
(0.001, 2, 7.600152115573173)])
def test_isf(self, q, c, ref):
assert_allclose(stats.genlogistic.isf(q, c), ref, rtol=5e-16)
@pytest.mark.parametrize('q, c, ref', [(0.5, 200, 5.6630969187064615),
(0.99, 20, 7.595630231412436)])
def test_ppf(self, q, c, ref):
assert_allclose(stats.genlogistic.ppf(q, c), ref, rtol=5e-16)
@pytest.mark.parametrize('x, c, ref', [(100, 0.02, -7.440151952041672e-46),
(50, 20, -3.857499695927835e-21)])
def test_logcdf(self, x, c, ref):
assert_allclose(stats.genlogistic.logcdf(x, c), ref, rtol=1e-15)
class TestHypergeom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
def test_mean_gh18511(self):
# gh-18511 reported that the `mean` was incorrect for large arguments;
# check that this is resolved
M = 390_000
n = 370_000
N = 12_000
hm = stats.hypergeom.mean(M, n, N)
rm = n / M * N
assert_allclose(hm, rm)
def test_sf_gh18506(self):
# gh-18506 reported that `sf` was incorrect for large population;
# check that this is resolved
n = 10
N = 10**5
i = np.arange(5, 15)
population_size = 10.**i
p = stats.hypergeom.sf(n - 1, population_size, N, n)
assert np.all(p > 0)
assert np.all(np.diff(p) < 0)
class TestLoggamma:
# Expected cdf values were computed with mpmath. For given x and c,
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# cdf = mpmath.gammainc(c, 0, mpmath.exp(x),
# regularized=True)
@pytest.mark.parametrize('x, c, cdf',
[(1, 2, 0.7546378854206702),
(-1, 14, 6.768116452566383e-18),
(-745.1, 0.001, 0.4749605142005238),
(-800, 0.001, 0.44958802911019136),
(-725, 0.1, 3.4301205868273265e-32),
(-740, 0.75, 1.0074360436599631e-241)])
def test_cdf_ppf(self, x, c, cdf):
p = stats.loggamma.cdf(x, c)
assert_allclose(p, cdf, rtol=1e-13)
y = stats.loggamma.ppf(cdf, c)
assert_allclose(y, x, rtol=1e-13)
# Expected sf values were computed with mpmath. For given x and c,
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# sf = mpmath.gammainc(c, mpmath.exp(x), mpmath.inf,
# regularized=True)
@pytest.mark.parametrize('x, c, sf',
[(4, 1.5, 1.6341528919488565e-23),
(6, 100, 8.23836829202024e-74),
(-800, 0.001, 0.5504119708898086),
(-743, 0.0025, 0.8437131370024089)])
def test_sf_isf(self, x, c, sf):
s = stats.loggamma.sf(x, c)
assert_allclose(s, sf, rtol=1e-13)
y = stats.loggamma.isf(sf, c)
assert_allclose(y, x, rtol=1e-13)
def test_logpdf(self):
# Test logpdf with x=-500, c=2. ln(gamma(2)) = 0, and
# exp(-500) ~= 7e-218, which is far smaller than the ULP
# of c*x=-1000, so logpdf(-500, 2) = c*x - exp(x) - ln(gamma(2))
# should give -1000.0.
lp = stats.loggamma.logpdf(-500, 2)
assert_allclose(lp, -1000.0, rtol=1e-14)
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
@pytest.mark.parametrize('c', [0.1, 0.001])
def test_rvs(self, c):
# Regression test for gh-11094.
x = stats.loggamma.rvs(c, size=100000)
# Before gh-11094 was fixed, the case with c=0.001 would
# generate many -inf values.
assert np.isfinite(x).all()
# Crude statistical test. About half the values should be
# less than the median and half greater than the median.
med = stats.loggamma.median(c)
btest = stats.binomtest(np.count_nonzero(x < med), len(x))
ci = btest.proportion_ci(confidence_level=0.999)
assert ci.low < 0.5 < ci.high
@pytest.mark.parametrize("c, ref",
[(1e-8, 19.420680753952364),
(1, 1.5772156649015328),
(1e4, -3.186214986116763),
(1e10, -10.093986931748889),
(1e100, -113.71031611649761)])
def test_entropy(self, c, ref):
# Reference values were calculated with mpmath
# from mpmath import mp
# mp.dps = 500
# def loggamma_entropy_mpmath(c):
# c = mp.mpf(c)
# return float(mp.log(mp.gamma(c)) + c * (mp.one - mp.digamma(c)))
assert_allclose(stats.loggamma.entropy(c), ref, rtol=1e-14)
class TestJohnsonsu:
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 50
# def johnsonsu_sf(x, a, b):
# x = mp.mpf(x)
# a = mp.mpf(a)
# b = mp.mpf(b)
# return float(mp.ncdf(-(a + b * mp.log(x + mp.sqrt(x*x + 1)))))
# Order is x, a, b, sf, isf tol
# (Can't expect full precision when the ISF input is very nearly 1)
cases = [(-500, 1, 1, 0.9999999982660072, 1e-8),
(2000, 1, 1, 7.426351000595343e-21, 5e-14),
(100000, 1, 1, 4.046923979269977e-40, 5e-14)]
@pytest.mark.parametrize("case", cases)
def test_sf_isf(self, case):
x, a, b, sf, tol = case
assert_allclose(stats.johnsonsu.sf(x, a, b), sf, rtol=5e-14)
assert_allclose(stats.johnsonsu.isf(sf, a, b), x, rtol=tol)
class TestJohnsonb:
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 50
# def johnsonb_sf(x, a, b):
# x = mp.mpf(x)
# a = mp.mpf(a)
# b = mp.mpf(b)
# return float(mp.ncdf(-(a + b * mp.log(x/(mp.one - x)))))
# Order is x, a, b, sf, isf atol
# (Can't expect full precision when the ISF input is very nearly 1)
cases = [(1e-4, 1, 1, 0.9999999999999999, 1e-7),
(0.9999, 1, 1, 8.921114313932308e-25, 5e-14),
(0.999999, 1, 1, 5.815197487181902e-50, 5e-14)]
@pytest.mark.parametrize("case", cases)
def test_sf_isf(self, case):
x, a, b, sf, tol = case
assert_allclose(stats.johnsonsb.sf(x, a, b), sf, rtol=5e-14)
assert_allclose(stats.johnsonsb.isf(sf, a, b), x, atol=tol)
class TestLogistic:
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
def test_logpdf_basic(self):
logp = stats.logistic.logpdf([-15, 0, 10])
# Expected values computed with mpmath with 50 digits of precision.
expected = [-15.000000611804547,
-1.3862943611198906,
-10.000090797798434]
assert_allclose(logp, expected, rtol=1e-13)
def test_logpdf_extreme_values(self):
logp = stats.logistic.logpdf([800, -800])
# For such large arguments, logpdf(x) = -abs(x) when computed
# with 64 bit floating point.
assert_equal(logp, [-800, -800])
@pytest.mark.parametrize("loc_rvs,scale_rvs", [(0.4484955, 0.10216821),
(0.62918191, 0.74367064)])
def test_fit(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# test that result of fit method is the same as optimization
def func(input, data):
a, b = input
n = len(data)
x1 = np.sum(np.exp((data - a) / b) /
(1 + np.exp((data - a) / b))) - n / 2
x2 = np.sum(((data - a) / b) *
((np.exp((data - a) / b) - 1) /
(np.exp((data - a) / b) + 1))) - n
return x1, x2
expected_solution = root(func, stats.logistic._fitstart(data), args=(
data,)).x
fit_method = stats.logistic.fit(data)
# other than computational variances, the fit method and the solution
# to this system of equations are equal
assert_allclose(fit_method, expected_solution, atol=1e-30)
def test_fit_comp_optimizer(self):
data = stats.logistic.rvs(size=100, loc=0.5, scale=2)
_assert_less_or_close_loglike(stats.logistic, data)
_assert_less_or_close_loglike(stats.logistic, data, floc=1)
_assert_less_or_close_loglike(stats.logistic, data, fscale=1)
@pytest.mark.parametrize('testlogcdf', [True, False])
def test_logcdfsf_tails(self, testlogcdf):
# Test either logcdf or logsf. By symmetry, we can use the same
# expected values for both by switching the sign of x for logsf.
x = np.array([-10000, -800, 17, 50, 500])
if testlogcdf:
y = stats.logistic.logcdf(x)
else:
y = stats.logistic.logsf(-x)
# The expected values were computed with mpmath.
expected = [-10000.0, -800.0, -4.139937633089748e-08,
-1.9287498479639178e-22, -7.124576406741286e-218]
assert_allclose(y, expected, rtol=2e-15)
def test_fit_gh_18176(self):
# logistic.fit returned `scale < 0` for this data. Check that this has
# been fixed.
data = np.array([-459, 37, 43, 45, 45, 48, 54, 55, 58]
+ [59] * 3 + [61] * 9)
# If scale were negative, NLLF would be infinite, so this would fail
_assert_less_or_close_loglike(stats.logistic, data)
class TestLogser:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestGumbel_r_l:
@pytest.fixture(scope='function')
def rng(self):
return np.random.default_rng(1234)
@pytest.mark.parametrize("dist", [stats.gumbel_r, stats.gumbel_l])
@pytest.mark.parametrize("loc_rvs", [-1, 0, 1])
@pytest.mark.parametrize("scale_rvs", [.1, 1, 5])
@pytest.mark.parametrize('fix_loc, fix_scale',
([True, False], [False, True]))
def test_fit_comp_optimizer(self, dist, loc_rvs, scale_rvs,
fix_loc, fix_scale, rng):
data = dist.rvs(size=100, loc=loc_rvs, scale=scale_rvs,
random_state=rng)
kwds = dict()
# the fixed location and scales are arbitrarily modified to not be
# close to the true value.
if fix_loc:
kwds['floc'] = loc_rvs * 2
if fix_scale:
kwds['fscale'] = scale_rvs * 2
# test that the gumbel_* fit method is better than super method
_assert_less_or_close_loglike(dist, data, **kwds)
@pytest.mark.parametrize("dist, sgn", [(stats.gumbel_r, 1),
(stats.gumbel_l, -1)])
def test_fit(self, dist, sgn):
z = sgn*np.array([3, 3, 3, 3, 3, 3, 3, 3.00000001])
loc, scale = dist.fit(z)
# The expected values were computed with mpmath with 60 digits
# of precision.
assert_allclose(loc, sgn*3.0000000001667906)
assert_allclose(scale, 1.2495222465145514e-09, rtol=1e-6)
class TestPareto:
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
@pytest.fixture(scope='function')
def rng(self):
return np.random.default_rng(1234)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit(self, rvs_shape, rvs_loc, rvs_scale, rng):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc, random_state=rng)
# shape can still be fixed with multiple names
shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0]
shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0]
shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0]
assert (shape_mle_analytical1 == shape_mle_analytical2 ==
shape_mle_analytical3 == 1.04)
# data can be shifted with changes to `loc`
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=(rvs_loc + 2), random_state=rng)
shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2)
assert_equal(scale_mle_a + 2, data.min())
data_shift = data - 2
ndata = data_shift.shape[0]
assert_equal(shape_mle_a,
ndata / np.sum(np.log(data_shift/data_shift.min())))
assert_equal(loc_mle_a, 2)
@pytest.mark.parametrize("rvs_shape", [.1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
@pytest.mark.parametrize('fix_shape, fix_loc, fix_scale',
[p for p in product([True, False], repeat=3)
if False in p])
@np.errstate(invalid="ignore")
def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale,
fix_shape, fix_loc, fix_scale, rng):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc, random_state=rng)
kwds = {}
if fix_shape:
kwds['f0'] = rvs_shape
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.pareto, data, **kwds)
@np.errstate(invalid="ignore")
def test_fit_known_bad_seed(self):
# Tests a known seed and set of parameters that would produce a result
# would violate the support of Pareto if the fit method did not check
# the constraint `fscale + floc < min(data)`.
shape, location, scale = 1, 0, 1
data = stats.pareto.rvs(shape, location, scale, size=100,
random_state=np.random.default_rng(2535619))
_assert_less_or_close_loglike(stats.pareto, data)
def test_fit_warnings(self):
assert_fit_warnings(stats.pareto)
# `floc` that causes invalid negative data
assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2)
# `floc` and `fscale` combination causes invalid data
assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1,
fscale=3)
def test_negative_data(self, rng):
data = stats.pareto.rvs(loc=-130, b=1, size=100, random_state=rng)
assert_array_less(data, 0)
# The purpose of this test is to make sure that no runtime warnings are
# raised for all negative data, not the output of the fit method. Other
# methods test the output but have to silence warnings from the super
# method.
_ = stats.pareto.fit(data)
class TestGenpareto:
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
def test_negative_cdf_bug_11186(self):
# incorrect CDFs for negative skews in gh-11186; fixed in gh-12640
# Also check vectorization w/ negative, zero, and positive skews
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
neg_inf = -30 # avoid RuntimeWarning caused by np.log(0)
cdfs = stats.pearson3.cdf(x_eval, skews)
int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0]
for skew in skews]
assert_allclose(cdfs, int_pdfs)
def test_return_array_bug_11746(self):
# pearson3.moment was returning size 0 or 1 array instead of float
# The first moment is equal to the loc, which defaults to zero
moment = stats.pearson3.moment(1, 2)
assert_equal(moment, 0)
assert isinstance(moment, np.number)
moment = stats.pearson3.moment(1, 0.000001)
assert_equal(moment, 0)
assert isinstance(moment, np.number)
def test_ppf_bug_17050(self):
# incorrect PPF for negative skews were reported in gh-17050
# Check that this is fixed (even in the array case)
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
res = stats.pearson3.ppf(stats.pearson3.cdf(x_eval, skews), skews)
assert_allclose(res, x_eval)
# Negation of the skew flips the distribution about the origin, so
# the following should hold
skew = np.array([[-0.5], [1.5]])
x = np.linspace(-2, 2)
assert_allclose(stats.pearson3.pdf(x, skew),
stats.pearson3.pdf(-x, -skew))
assert_allclose(stats.pearson3.cdf(x, skew),
stats.pearson3.sf(-x, -skew))
assert_allclose(stats.pearson3.ppf(x, skew),
-stats.pearson3.isf(x, -skew))
def test_sf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 50; Pearson3(skew=skew).sf(x). Check positive, negative,
# and zero skew due to branching.
skew = [0.1, 0.5, 1.0, -0.1]
x = [5.0, 10.0, 50.0, 8.0]
ref = [1.64721926440872e-06, 8.271911573556123e-11,
1.3149506021756343e-40, 2.763057937820296e-21]
assert_allclose(stats.pearson3.sf(x, skew), ref, rtol=2e-14)
assert_allclose(stats.pearson3.sf(x, 0), stats.norm.sf(x), rtol=2e-14)
class TestKappa4:
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson:
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo:
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01,
9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01,
9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01,
9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01,
9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01,
9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01,
9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvgauss:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (4.635, 4.362, 6.303)])
def test_fit(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# Analytical MLEs are calculated with formula when `floc` is fixed
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc)
data = data - rvs_loc
mu_temp = np.mean(data)
scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1)))
mu_mle = mu_temp/scale_mle
# `mu` and `scale` match analytical formula
assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15)
assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15)
assert_equal(loc, rvs_loc)
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# fixed parameters are returned
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1,
fscale=rvs_scale + 1)
assert_equal(rvs_scale + 1, scale)
assert_equal(rvs_loc - 1, loc)
# shape can still be fixed with multiple names
shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0]
shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0]
shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0]
assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (6.311, 3.225, 4.520)])
def test_fit_MLE_comp_optimizer(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
super_fit = super(type(stats.invgauss), stats.invgauss).fit
# fitting without `floc` uses superclass fit method
super_fitted = super_fit(data)
invgauss_fit = stats.invgauss.fit(data)
assert_equal(super_fitted, invgauss_fit)
# fitting with `fmu` is uses superclass fit method
super_fitted = super_fit(data, floc=0, fmu=2)
invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2)
assert_equal(super_fitted, invgauss_fit)
# fixed `floc` uses analytical formula and provides better fit than
# super method
_assert_less_or_close_loglike(stats.invgauss, data, floc=rvs_loc)
# fixed `floc` not resulting in invalid data < 0 uses analytical
# formulas and provides a better fit than the super method
assert np.all((data - (rvs_loc - 1)) > 0)
_assert_less_or_close_loglike(stats.invgauss, data, floc=rvs_loc - 1)
# fixed `floc` to an arbitrary number, 0, still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, floc=0)
# fixed `fscale` to an arbitrary number still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, floc=rvs_loc,
fscale=np.random.rand(1)[0])
def test_fit_raise_errors(self):
assert_fit_warnings(stats.invgauss)
# FitDataError is raised when negative invalid data
with pytest.raises(FitDataError):
stats.invgauss.fit([1, 2, 3], floc=2)
def test_cdf_sf(self):
# Regression tests for gh-13614.
# Ground truth from R's statmod library (pinvgauss), e.g.
# library(statmod)
# options(digits=15)
# mu = c(4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
# 3.02332573e-03, 1.46755891e-03)
# print(pinvgauss(5, mu, 1))
# make sure a finite value is returned when mu is very small. see
# GH-13614
mu = [4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
3.02332573e-03, 1.46755891e-03]
expected = [1, 1, 1, 1, 1]
actual = stats.invgauss.cdf(0.4, mu=mu)
assert_equal(expected, actual)
# test if the function can distinguish small left/right tail
# probabilities from zero.
cdf_actual = stats.invgauss.cdf(0.001, mu=1.05)
assert_allclose(cdf_actual, 4.65246506892667e-219)
sf_actual = stats.invgauss.sf(110, mu=1.05)
assert_allclose(sf_actual, 4.12851625944048e-25)
# test if x does not cause numerical issues when mu is very small
# and x is close to mu in value.
# slightly smaller than mu
actual = stats.invgauss.cdf(0.00009, 0.0001)
assert_allclose(actual, 2.9458022894924e-26)
# slightly bigger than mu
actual = stats.invgauss.cdf(0.000102, 0.0001)
assert_allclose(actual, 0.976445540507925)
def test_logcdf_logsf(self):
# Regression tests for improvements made in gh-13616.
# Ground truth from R's statmod library (pinvgauss), e.g.
# library(statmod)
# options(digits=15)
# print(pinvgauss(0.001, 1.05, 1, log.p=TRUE, lower.tail=FALSE))
# test if logcdf and logsf can compute values too small to
# be represented on the unlogged scale. See: gh-13616
logcdf = stats.invgauss.logcdf(0.0001, mu=1.05)
assert_allclose(logcdf, -5003.87872590367)
logcdf = stats.invgauss.logcdf(110, 1.05)
assert_allclose(logcdf, -4.12851625944087e-25)
logsf = stats.invgauss.logsf(0.001, mu=1.05)
assert_allclose(logsf, -4.65246506892676e-219)
logsf = stats.invgauss.logsf(110, 1.05)
assert_allclose(logsf, -56.1467092416426)
# from mpmath import mp
# mp.dps = 100
# mu = mp.mpf(1e-2)
# ref = (1/2 * mp.log(2 * mp.pi * mp.e * mu**3)
# - 3/2* mp.exp(2/mu) * mp.e1(2/mu))
@pytest.mark.parametrize("mu, ref", [(2e-8, -25.172361826883957),
(1e-3, -8.943444010642972),
(1e-2, -5.4962796152622335),
(1e8, 3.3244822568873476),
(1e100, 3.32448280139689)])
def test_entropy(self, mu, ref):
assert_allclose(stats.invgauss.entropy(mu), ref, rtol=5e-14)
class TestLaplace:
@pytest.mark.parametrize("rvs_loc", [-5, 0, 1, 2])
@pytest.mark.parametrize("rvs_scale", [1, 2, 3, 10])
def test_fit(self, rvs_loc, rvs_scale):
# tests that various inputs follow expected behavior
# for a variety of `loc` and `scale`.
data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale)
# MLE estimates are given by
loc_mle = np.median(data)
scale_mle = np.sum(np.abs(data - loc_mle)) / len(data)
# standard outputs should match analytical MLE formulas
loc, scale = stats.laplace.fit(data)
assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
# fixed parameter should use analytical formula for other
loc, scale = stats.laplace.fit(data, floc=loc_mle)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_allclose(loc, loc_mle)
# test with non-mle fixed parameter
# create scale with non-median loc
loc = rvs_loc * 2
scale_mle = np.sum(np.abs(data - loc)) / len(data)
# fixed loc to non median, scale should match
# scale calculation with modified loc
loc, scale = stats.laplace.fit(data, floc=loc)
assert_equal(scale_mle, scale)
# fixed scale created with non median loc,
# loc output should still be the data median.
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_equal(loc_mle, loc)
# error raised when both `floc` and `fscale` are fixed
assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle,
fscale=scale_mle)
# error is raised with non-finite values
assert_raises(ValueError, stats.laplace.fit, [np.nan])
assert_raises(ValueError, stats.laplace.fit, [np.inf])
@pytest.mark.parametrize("rvs_loc,rvs_scale", [(-5, 10),
(10, 5),
(0.5, 0.2)])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale):
data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale)
# the log-likelihood function for laplace is given by
def ll(loc, scale, data):
return -1 * (- (len(data)) * np.log(2*scale) -
(1/scale)*np.sum(np.abs(data - loc)))
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
loc, scale = stats.laplace.fit(data)
loc_opt, scale_opt = super(type(stats.laplace),
stats.laplace).fit(data)
ll_mle = ll(loc, scale, data)
ll_opt = ll(loc_opt, scale_opt, data)
assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt,
atol=1e-15, rtol=1e-15)
def test_fit_simple_non_random_data(self):
data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0])
# with `floc` fixed to 6, scale should be 4.
loc, scale = stats.laplace.fit(data, floc=6)
assert_allclose(scale, 4, atol=1e-15, rtol=1e-15)
# with `fscale` fixed to 6, loc should be 4.
loc, scale = stats.laplace.fit(data, fscale=6)
assert_allclose(loc, 4, atol=1e-15, rtol=1e-15)
def test_sf_cdf_extremes(self):
# These calculations should not generate warnings.
x = 1000
p0 = stats.laplace.cdf(-x)
# The exact value is smaller than can be represented with
# 64 bit floating point, so the exected result is 0.
assert p0 == 0.0
# The closest 64 bit floating point representation of the
# exact value is 1.0.
p1 = stats.laplace.cdf(x)
assert p1 == 1.0
p0 = stats.laplace.sf(x)
# The exact value is smaller than can be represented with
# 64 bit floating point, so the exected result is 0.
assert p0 == 0.0
# The closest 64 bit floating point representation of the
# exact value is 1.0.
p1 = stats.laplace.sf(-x)
assert p1 == 1.0
def test_sf(self):
x = 200
p = stats.laplace.sf(x)
assert_allclose(p, np.exp(-x)/2, rtol=1e-13)
def test_isf(self):
p = 1e-25
x = stats.laplace.isf(p)
assert_allclose(x, -np.log(2*p), rtol=1e-13)
class TestLogLaplace:
def test_sf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 100; LogLaplace(c=c).sf(x).
c = np.array([2.0, 3.0, 5.0])
x = np.array([1e-5, 1e10, 1e15])
ref = [0.99999999995, 5e-31, 5e-76]
assert_allclose(stats.loglaplace.sf(x, c), ref, rtol=1e-15)
def test_isf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 100; LogLaplace(c=c).isf(q).
c = 3.25
q = [0.8, 0.1, 1e-10, 1e-20, 1e-40]
ref = [0.7543222539245642, 1.6408455124660906, 964.4916294395846,
1151387.578354072, 1640845512466.0906]
assert_allclose(stats.loglaplace.isf(q, c), ref, rtol=1e-14)
class TestPowerlaw:
# In the following data, `sf` was computed with mpmath.
@pytest.mark.parametrize('x, a, sf',
[(0.25, 2.0, 0.9375),
(0.99609375, 1/256, 1.528855235208108e-05)])
def test_sf(self, x, a, sf):
assert_allclose(stats.powerlaw.sf(x, a), sf, rtol=1e-15)
@pytest.fixture(scope='function')
def rng(self):
return np.random.default_rng(1234)
@pytest.mark.parametrize("rvs_shape", [.1, .5, .75, 1, 2])
@pytest.mark.parametrize("rvs_loc", [-1, 0, 1])
@pytest.mark.parametrize("rvs_scale", [.1, 1, 5])
@pytest.mark.parametrize('fix_shape, fix_loc, fix_scale',
[p for p in product([True, False], repeat=3)
if False in p])
def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale,
fix_shape, fix_loc, fix_scale, rng):
data = stats.powerlaw.rvs(size=250, a=rvs_shape, loc=rvs_loc,
scale=rvs_scale, random_state=rng)
kwds = dict()
if fix_shape:
kwds['f0'] = rvs_shape
if fix_loc:
kwds['floc'] = np.nextafter(data.min(), -np.inf)
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.powerlaw, data, **kwds)
def test_problem_case(self):
# An observed problem with the test method indicated that some fixed
# scale values could cause bad results, this is now corrected.
a = 2.50002862645130604506
location = 0.0
scale = 35.249023299873095
data = stats.powerlaw.rvs(a=a, loc=location, scale=scale, size=100,
random_state=np.random.default_rng(5))
kwds = {'fscale': np.ptp(data) * 2}
_assert_less_or_close_loglike(stats.powerlaw, data, **kwds)
def test_fit_warnings(self):
assert_fit_warnings(stats.powerlaw)
# test for error when `fscale + floc <= np.max(data)` is not satisfied
msg = r" Maximum likelihood estimation with 'powerlaw' requires"
with assert_raises(FitDataError, match=msg):
stats.powerlaw.fit([1, 2, 4], floc=0, fscale=3)
# test for error when `data - floc >= 0` is not satisfied
msg = r" Maximum likelihood estimation with 'powerlaw' requires"
with assert_raises(FitDataError, match=msg):
stats.powerlaw.fit([1, 2, 4], floc=2)
# test for fixed location not less than `min(data)`.
msg = r" Maximum likelihood estimation with 'powerlaw' requires"
with assert_raises(FitDataError, match=msg):
stats.powerlaw.fit([1, 2, 4], floc=1)
# test for when fixed scale is less than or equal to range of data
msg = r"Negative or zero `fscale` is outside"
with assert_raises(ValueError, match=msg):
stats.powerlaw.fit([1, 2, 4], fscale=-3)
# test for when fixed scale is less than or equal to range of data
msg = r"`fscale` must be greater than the range of data."
with assert_raises(ValueError, match=msg):
stats.powerlaw.fit([1, 2, 4], fscale=3)
def test_minimum_data_zero_gh17801(self):
# gh-17801 reported an overflow error when the minimum value of the
# data is zero. Check that this problem is resolved.
data = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6]
dist = stats.powerlaw
with np.errstate(over='ignore'):
_assert_less_or_close_loglike(dist, data)
class TestPowerLogNorm:
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 80
# def powerlognorm_sf_mp(x, c, s):
# x = mp.mpf(x)
# c = mp.mpf(c)
# s = mp.mpf(s)
# return mp.ncdf(-mp.log(x) / s)**c
#
# def powerlognormal_cdf_mp(x, c, s):
# return mp.one - powerlognorm_sf_mp(x, c, s)
#
# x, c, s = 100, 20, 1
# print(float(powerlognorm_sf_mp(x, c, s)))
@pytest.mark.parametrize("x, c, s, ref",
[(100, 20, 1, 1.9057100820561928e-114),
(1e-3, 20, 1, 0.9999999999507617),
(1e-3, 0.02, 1, 0.9999999999999508),
(1e22, 0.02, 1, 6.50744044621611e-12)])
def test_sf(self, x, c, s, ref):
assert_allclose(stats.powerlognorm.sf(x, c, s), ref, rtol=1e-13)
# reference values were computed via mpmath using the survival
# function above (passing in `ref` and getting `q`).
@pytest.mark.parametrize("q, c, s, ref",
[(0.9999999587870905, 0.02, 1, 0.01),
(6.690376686108851e-233, 20, 1, 1000)])
def test_isf(self, q, c, s, ref):
assert_allclose(stats.powerlognorm.isf(q, c, s), ref, rtol=5e-11)
@pytest.mark.parametrize("x, c, s, ref",
[(1e25, 0.02, 1, 0.9999999999999963),
(1e-6, 0.02, 1, 2.054921078040843e-45),
(1e-6, 200, 1, 2.0549210780408428e-41),
(0.3, 200, 1, 0.9999999999713368)])
def test_cdf(self, x, c, s, ref):
assert_allclose(stats.powerlognorm.cdf(x, c, s), ref, rtol=3e-14)
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 50
# def powerlognorm_pdf_mpmath(x, c, s):
# x = mp.mpf(x)
# c = mp.mpf(c)
# s = mp.mpf(s)
# res = (c/(x * s) * mp.npdf(mp.log(x)/s) *
# mp.ncdf(-mp.log(x)/s)**(c - mp.one))
# return float(res)
@pytest.mark.parametrize("x, c, s, ref",
[(1e22, 0.02, 1, 6.5954987852335016e-34),
(1e20, 1e-3, 1, 1.588073750563988e-22),
(1e40, 1e-3, 1, 1.3179391812506349e-43)])
def test_pdf(self, x, c, s, ref):
assert_allclose(stats.powerlognorm.pdf(x, c, s), ref, rtol=3e-12)
class TestPowerNorm:
# survival function references were computed with mpmath via
# from mpmath import mp
# x = mp.mpf(x)
# c = mp.mpf(x)
# float(mp.ncdf(-x)**c)
@pytest.mark.parametrize("x, c, ref",
[(9, 1, 1.1285884059538405e-19),
(20, 2, 7.582445786569958e-178),
(100, 0.02, 3.330957891903866e-44),
(200, 0.01, 1.3004759092324774e-87)])
def test_sf(self, x, c, ref):
assert_allclose(stats.powernorm.sf(x, c), ref, rtol=1e-13)
# inverse survival function references were computed with mpmath via
# from mpmath import mp
# def isf_mp(q, c):
# q = mp.mpf(q)
# c = mp.mpf(c)
# arg = q**(mp.one / c)
# return float(-mp.sqrt(2) * mp.erfinv(mp.mpf(2.) * arg - mp.one))
@pytest.mark.parametrize("q, c, ref",
[(1e-5, 20, -0.15690800666514138),
(0.99999, 100, -5.19933666203545),
(0.9999, 0.02, -2.576676052143387),
(5e-2, 0.02, 17.089518110222244),
(1e-18, 2, 5.9978070150076865),
(1e-50, 5, 6.361340902404057)])
def test_isf(self, q, c, ref):
assert_allclose(stats.powernorm.isf(q, c), ref, rtol=5e-12)
# CDF reference values were computed with mpmath via
# from mpmath import mp
# def cdf_mp(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return float(mp.one - mp.ncdf(-x)**c)
@pytest.mark.parametrize("x, c, ref",
[(-12, 9, 1.598833900869911e-32),
(2, 9, 0.9999999999999983),
(-20, 9, 2.4782617067456103e-88),
(-5, 0.02, 5.733032242841443e-09),
(-20, 0.02, 5.507248237212467e-91)])
def test_cdf(self, x, c, ref):
assert_allclose(stats.powernorm.cdf(x, c), ref, rtol=5e-14)
class TestInvGamma:
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
@pytest.mark.parametrize("a, ref",
[(100000000.0, -26.21208257605721),
(1e+100, -343.9688254159022)])
def test_large_entropy(self, a, ref):
# The reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
# def invgamma_entropy(a):
# a = mp.mpf(a)
# h = a + mp.loggamma(a) - (mp.one + a) * mp.digamma(a)
# return float(h)
assert_allclose(stats.invgamma.entropy(a), ref, rtol=1e-15)
class TestF:
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
class TestStudentT:
def test_rvgeneric_std(self):
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t(self):
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
def test_t_entropy(self):
df = [1, 2, 25, 100]
# Expected values were computed with mpmath.
expected = [2.5310242469692907, 1.9602792291600821,
1.459327578078393, 1.4289633653182439]
assert_allclose(stats.t.entropy(df), expected, rtol=1e-13)
@pytest.mark.parametrize("v, ref",
[(100, 1.4289633653182439),
(1e+100, 1.4189385332046727)])
def test_t_extreme_entropy(self, v, ref):
# Reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
#
# def t_entropy(v):
# v = mp.mpf(v)
# C = (v + mp.one) / 2
# A = C * (mp.digamma(C) - mp.digamma(v / 2))
# B = 0.5 * mp.log(v) + mp.log(mp.beta(v / 2, mp.one / 2))
# h = A + B
# return float(h)
assert_allclose(stats.t.entropy(v), ref, rtol=1e-14)
@pytest.mark.parametrize("methname", ["pdf", "logpdf", "cdf",
"ppf", "sf", "isf"])
@pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1],
[[0, 1, 0], [1, 1, 1]],
[[1, 0], [0, 1]],
[[0], [1]]])
def test_t_inf_df(self, methname, df_infmask):
np.random.seed(0)
df_infmask = np.asarray(df_infmask, dtype=bool)
df = np.random.uniform(0, 10, size=df_infmask.shape)
x = np.random.randn(*df_infmask.shape)
df[df_infmask] = np.inf
t_dist = stats.t(df=df, loc=3, scale=1)
t_dist_ref = stats.t(df=df[~df_infmask], loc=3, scale=1)
norm_dist = stats.norm(loc=3, scale=1)
t_meth = getattr(t_dist, methname)
t_meth_ref = getattr(t_dist_ref, methname)
norm_meth = getattr(norm_dist, methname)
res = t_meth(x)
assert_equal(res[df_infmask], norm_meth(x[df_infmask]))
assert_equal(res[~df_infmask], t_meth_ref(x[~df_infmask]))
@pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1],
[[0, 1, 0], [1, 1, 1]],
[[1, 0], [0, 1]],
[[0], [1]]])
def test_t_inf_df_stats_entropy(self, df_infmask):
np.random.seed(0)
df_infmask = np.asarray(df_infmask, dtype=bool)
df = np.random.uniform(0, 10, size=df_infmask.shape)
df[df_infmask] = np.inf
res = stats.t.stats(df=df, loc=3, scale=1, moments='mvsk')
res_ex_inf = stats.norm.stats(loc=3, scale=1, moments='mvsk')
res_ex_noinf = stats.t.stats(df=df[~df_infmask], loc=3, scale=1,
moments='mvsk')
for i in range(4):
assert_equal(res[i][df_infmask], res_ex_inf[i])
assert_equal(res[i][~df_infmask], res_ex_noinf[i])
res = stats.t.entropy(df=df, loc=3, scale=1)
res_ex_inf = stats.norm.entropy(loc=3, scale=1)
res_ex_noinf = stats.t.entropy(df=df[~df_infmask], loc=3, scale=1)
assert_equal(res[df_infmask], res_ex_inf)
assert_equal(res[~df_infmask], res_ex_noinf)
def test_logpdf_pdf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 500; StudentT(df=df).logpdf(x), StudentT(df=df).pdf(x)
x = [1, 1e3, 10, 1]
df = [1e100, 1e50, 1e20, 1]
logpdf_ref = [-1.4189385332046727, -500000.9189385332,
-50.918938533204674, -1.8378770664093456]
pdf_ref = [0.24197072451914334, 0,
7.69459862670642e-23, 0.15915494309189535]
assert_allclose(stats.t.logpdf(x, df), logpdf_ref, rtol=1e-15)
assert_allclose(stats.t.pdf(x, df), pdf_ref, rtol=1e-14)
class TestRvDiscrete:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert np.issubdtype(type(x), np.integer)
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 1]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
def test_expect1(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_expect2(self):
# rv_sample should override _expect. Bug report from
# https://stackoverflow.com/questions/63199792
y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,
1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,
2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,
3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,
4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]
py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,
0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,
0.00019999999999997797, 0.0006000000000000449,
0.024499999999999966, 0.006400000000000072,
0.0043999999999999595, 0.019499999999999962,
0.03770000000000007, 0.01759999999999995, 0.015199999999999991,
0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,
0.0, 0.0041000000000001036, 0.005999999999999894,
0.0042000000000000925, 0.0050000000000000044,
0.0041999999999999815, 0.0004999999999999449,
0.009199999999999986, 0.008200000000000096,
0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,
0.0006000000000000449, 0.02510000000000001, 0.0,
0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,
0.008199999999999985, 0.005600000000000049, 0.0]
rv = stats.rv_discrete(values=(y, py))
# check the mean
assert_allclose(rv.expect(), rv.mean(), atol=1e-14)
assert_allclose(rv.expect(),
sum(v * w for v, w in zip(y, py)), atol=1e-14)
# also check the second moment
assert_allclose(rv.expect(lambda x: x**2),
sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)
class TestSkewCauchy:
def test_cauchy(self):
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewcauchy.pdf(x, a=0),
stats.cauchy.pdf(x))
assert_array_almost_equal(stats.skewcauchy.cdf(x, a=0),
stats.cauchy.cdf(x))
assert_array_almost_equal(stats.skewcauchy.ppf(x, a=0),
stats.cauchy.ppf(x))
def test_skewcauchy_R(self):
# options(digits=16)
# library(sgt)
# # lmbda, x contain the values generated for a, x below
# lmbda <- c(0.0976270078546495, 0.430378732744839, 0.2055267521432877,
# 0.0897663659937937, -0.15269040132219, 0.2917882261333122,
# -0.12482557747462, 0.7835460015641595, 0.9273255210020589,
# -0.2331169623484446)
# x <- c(2.917250380826646, 0.2889491975290444, 0.6804456109393229,
# 4.25596638292661, -4.289639418021131, -4.1287070029845925,
# -4.797816025596743, 3.32619845547938, 2.7815675094985046,
# 3.700121482468191)
# pdf = dsgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
# cdf = psgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
# qsgt(cdf, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
np.random.seed(0)
a = np.random.rand(10) * 2 - 1
x = np.random.rand(10) * 10 - 5
pdf = [0.039473975217333909, 0.305829714049903223, 0.24140158118994162,
0.019585772402693054, 0.021436553695989482, 0.00909817103867518,
0.01658423410016873, 0.071083288030394126, 0.103250045941454524,
0.013110230778426242]
cdf = [0.87426677718213752, 0.37556468910780882, 0.59442096496538066,
0.91304659850890202, 0.09631964100300605, 0.03829624330921733,
0.08245240578402535, 0.72057062945510386, 0.62826415852515449,
0.95011308463898292]
assert_allclose(stats.skewcauchy.pdf(x, a), pdf)
assert_allclose(stats.skewcauchy.cdf(x, a), cdf)
assert_allclose(stats.skewcauchy.ppf(cdf, a), x)
# Test data for TestSkewNorm.test_noncentral_moments()
# The expected noncentral moments were computed by Wolfram Alpha.
# In Wolfram Alpha, enter
# SkewNormalDistribution[0, 1, a] moment
# with `a` replaced by the desired shape parameter. In the results, there
# should be a table of the first four moments. Click on "More" to get more
# moments. The expected moments start with the first moment (order = 1).
_skewnorm_noncentral_moments = [
(2, [2*np.sqrt(2/(5*np.pi)),
1,
22/5*np.sqrt(2/(5*np.pi)),
3,
446/25*np.sqrt(2/(5*np.pi)),
15,
2682/25*np.sqrt(2/(5*np.pi)),
105,
107322/125*np.sqrt(2/(5*np.pi))]),
(0.1, [np.sqrt(2/(101*np.pi)),
1,
302/101*np.sqrt(2/(101*np.pi)),
3,
(152008*np.sqrt(2/(101*np.pi)))/10201,
15,
(107116848*np.sqrt(2/(101*np.pi)))/1030301,
105,
(97050413184*np.sqrt(2/(101*np.pi)))/104060401]),
(-3, [-3/np.sqrt(5*np.pi),
1,
-63/(10*np.sqrt(5*np.pi)),
3,
-2529/(100*np.sqrt(5*np.pi)),
15,
-30357/(200*np.sqrt(5*np.pi)),
105,
-2428623/(2000*np.sqrt(5*np.pi)),
945,
-242862867/(20000*np.sqrt(5*np.pi)),
10395,
-29143550277/(200000*np.sqrt(5*np.pi)),
135135]),
]
class TestSkewNorm:
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
@pytest.mark.parametrize('a, moments', _skewnorm_noncentral_moments)
def test_noncentral_moments(self, a, moments):
for order, expected in enumerate(moments, start=1):
mom = stats.skewnorm.moment(order, a)
assert_allclose(mom, expected, rtol=1e-14)
def test_fit(self):
rng = np.random.default_rng(4609813989115202851)
a, loc, scale = -2, 3.5, 0.5 # arbitrary, valid parameters
dist = stats.skewnorm(a, loc, scale)
rvs = dist.rvs(size=100, random_state=rng)
# test that MLE still honors guesses and fixed parameters
a2, loc2, scale2 = stats.skewnorm.fit(rvs, -1.5, floc=3)
a3, loc3, scale3 = stats.skewnorm.fit(rvs, -1.6, floc=3)
assert loc2 == loc3 == 3 # fixed parameter is respected
assert a2 != a3 # different guess -> (slightly) different outcome
# quality of fit is tested elsewhere
# test that MoM honors fixed parameters, accepts (but ignores) guesses
a4, loc4, scale4 = stats.skewnorm.fit(rvs, 3, fscale=3, method='mm')
assert scale4 == 3
# because scale was fixed, only the mean and skewness will be matched
dist4 = stats.skewnorm(a4, loc4, scale4)
res = dist4.stats(moments='ms')
ref = np.mean(rvs), stats.skew(rvs)
assert_allclose(res, ref)
# Test behavior when skew of data is beyond maximum of skewnorm
rvs2 = stats.pareto.rvs(1, size=100, random_state=rng)
# MLE still works
res = stats.skewnorm.fit(rvs2)
assert np.all(np.isfinite(res))
# MoM fits variance and skewness
a5, loc5, scale5 = stats.skewnorm.fit(rvs2, method='mm')
assert np.isinf(a5)
# distribution infrastruction doesn't allow infinite shape parameters
# into _stats; it just bypasses it and produces NaNs. Calculate
# moments manually.
m, v = np.mean(rvs2), np.var(rvs2)
assert_allclose(m, loc5 + scale5 * np.sqrt(2/np.pi))
assert_allclose(v, scale5**2 * (1 - 2 / np.pi))
# test that MLE and MoM behave as expected under sign changes
a6p, loc6p, scale6p = stats.skewnorm.fit(rvs, method='mle')
a6m, loc6m, scale6m = stats.skewnorm.fit(-rvs, method='mle')
assert_allclose([a6m, loc6m, scale6m], [-a6p, -loc6p, scale6p])
a7p, loc7p, scale7p = stats.skewnorm.fit(rvs, method='mm')
a7m, loc7m, scale7m = stats.skewnorm.fit(-rvs, method='mm')
assert_allclose([a7m, loc7m, scale7m], [-a7p, -loc7p, scale7p])
class TestExpon:
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.expon.fit, x)
class TestNorm:
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
@pytest.mark.parametrize('loc', [0, 1])
def test_delta_cdf(self, loc):
# The expected value is computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 60
# >>> float(mpmath.ncdf(12) - mpmath.ncdf(11))
# 1.910641809677555e-28
expected = 1.910641809677555e-28
delta = stats.norm._delta_cdf(11+loc, 12+loc, loc=loc)
assert_allclose(delta, expected, rtol=1e-13)
delta = stats.norm._delta_cdf(-(12+loc), -(11+loc), loc=-loc)
assert_allclose(delta, expected, rtol=1e-13)
class TestUniform:
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.uniform.fit, x)
class TestExponNorm:
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
# Expected values for the PDF were computed with mpmath, with
# the following function, and with mpmath.mp.dps = 50.
#
# def exponnorm_stdpdf(x, K):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# t1 = mpmath.exp(1/(2*K**2) - x/K)
# erfcarg = -(x - 1/K)/mpmath.sqrt(2)
# t2 = mpmath.erfc(erfcarg)
# return t1 * t2 / (2*K)
#
@pytest.mark.parametrize('x, K, expected',
[(20, 0.01, 6.90010764753618e-88),
(1, 0.01, 0.24438994313247364),
(-1, 0.01, 0.23955149623472075),
(-20, 0.01, 4.6004708690125477e-88),
(10, 1, 7.48518298877006e-05),
(10, 10000, 9.990005048283775e-05)])
def test_std_pdf(self, x, K, expected):
assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=5e-12)
# Expected values for the CDF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_cdf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(z)
# - mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[0, 0.01, 1, 0.4960109760186432],
[-5, 0.005, 1, 2.7939945412195734e-07],
[-1e4, 0.01, 100, 0.0],
[-1e4, 0.01, 1000, 6.920401854427357e-24],
[5, 0.001, 1, 0.9999997118542392]])
def test_cdf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.cdf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=1e-13)
# Expected values for the SF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_sf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(-z)
# + mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[10, 0.01, 1, 8.474702916146657e-24],
[2, 0.005, 1, 0.02302280664231312],
[5, 0.005, 0.5, 8.024820681931086e-24],
[10, 0.005, 0.5, 3.0603340062892486e-89],
[20, 0.005, 0.5, 0.0],
[-3, 0.001, 1, 0.9986545205566117]])
def test_sf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.sf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=5e-13)
class TestGenExpon:
def test_pdf_unity_area(self):
from scipy.integrate import simpson
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simpson(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
# The values of p in the following data were computed with mpmath.
# E.g. the script
# from mpmath import mp
# mp.dps = 80
# x = mp.mpf('15.0')
# a = mp.mpf('1.0')
# b = mp.mpf('2.0')
# c = mp.mpf('1.5')
# print(float(mp.exp((-a-b)*x + (b/c)*-mp.expm1(-c*x))))
# prints
# 1.0859444834514553e-19
@pytest.mark.parametrize('x, p, a, b, c',
[(15, 1.0859444834514553e-19, 1, 2, 1.5),
(0.25, 0.7609068232534623, 0.5, 2, 3),
(0.25, 0.09026661397565876, 9.5, 2, 0.5),
(0.01, 0.9753038265071597, 2.5, 0.25, 0.5),
(3.25, 0.0001962824553094492, 2.5, 0.25, 0.5),
(0.125, 0.9508674287164001, 0.25, 5, 0.5)])
def test_sf_isf(self, x, p, a, b, c):
sf = stats.genexpon.sf(x, a, b, c)
assert_allclose(sf, p, rtol=1e-14)
isf = stats.genexpon.isf(p, a, b, c)
assert_allclose(isf, x, rtol=1e-14)
# The values of p in the following data were computed with mpmath.
@pytest.mark.parametrize('x, p, a, b, c',
[(0.25, 0.2390931767465377, 0.5, 2, 3),
(0.25, 0.9097333860243412, 9.5, 2, 0.5),
(0.01, 0.0246961734928403, 2.5, 0.25, 0.5),
(3.25, 0.9998037175446906, 2.5, 0.25, 0.5),
(0.125, 0.04913257128359998, 0.25, 5, 0.5)])
def test_cdf_ppf(self, x, p, a, b, c):
cdf = stats.genexpon.cdf(x, a, b, c)
assert_allclose(cdf, p, rtol=1e-14)
ppf = stats.genexpon.ppf(p, a, b, c)
assert_allclose(ppf, x, rtol=1e-14)
class TestTruncexpon:
def test_sf_isf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 50; TruncExpon(b=b).sf(x)
b = [20, 100]
x = [19.999999, 99.999999]
ref = [2.0611546593828472e-15, 3.7200778266671455e-50]
assert_allclose(stats.truncexpon.sf(x, b), ref, rtol=1e-10)
assert_allclose(stats.truncexpon.isf(ref, b), x, rtol=1e-12)
class TestExponpow:
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam:
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
def test_extreme_mu2(self):
# check that crash reported by gh-17916 large mu2 is resolved
x, mu1, mu2 = 0, 1, 4820232647677555.0
assert_allclose(stats.skellam.pmf(x, mu1, mu2), 0, atol=1e-16)
assert_allclose(stats.skellam.cdf(x, mu1, mu2), 1, atol=1e-16)
class TestLognorm:
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
@pytest.fixture(scope='function')
def rng(self):
return np.random.default_rng(1234)
@pytest.mark.parametrize("rvs_shape", [.1, 2])
@pytest.mark.parametrize("rvs_loc", [-2, 0, 2])
@pytest.mark.parametrize("rvs_scale", [.2, 1, 5])
@pytest.mark.parametrize('fix_shape, fix_loc, fix_scale',
[e for e in product((False, True), repeat=3)
if False in e])
@np.errstate(invalid="ignore")
def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale,
fix_shape, fix_loc, fix_scale, rng):
data = stats.lognorm.rvs(size=100, s=rvs_shape, scale=rvs_scale,
loc=rvs_loc, random_state=rng)
kwds = {}
if fix_shape:
kwds['f0'] = rvs_shape
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.lognorm, data, **kwds)
def test_isf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 100;
# LogNormal(s=s).isf(q=0.1, guess=0)
# LogNormal(s=s).isf(q=2e-10, guess=100)
s = 0.954
q = [0.1, 2e-10, 5e-20, 6e-40]
ref = [3.3960065375794937, 390.07632793595974, 5830.5020828128445,
287872.84087457904]
assert_allclose(stats.lognorm.isf(q, s), ref, rtol=1e-14)
class TestBeta:
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
@pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
def test_issue_12635(self):
# Confirm that Boost's beta distribution resolves gh-12635.
# Check against R:
# options(digits=16)
# p = 0.9999999999997369
# a = 75.0
# b = 66334470.0
# print(qbeta(p, a, b))
p, a, b = 0.9999999999997369, 75.0, 66334470.0
assert_allclose(stats.beta.ppf(p, a, b), 2.343620802982393e-06)
@pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
def test_issue_12794(self):
# Confirm that Boost's beta distribution resolves gh-12794.
# Check against R.
# options(digits=16)
# p = 1e-11
# count_list = c(10,100,1000)
# print(qbeta(1-p, count_list + 1, 100000 - count_list))
inv_R = np.array([0.0004944464889611935,
0.0018360586912635726,
0.0122663919942518351])
count_list = np.array([10, 100, 1000])
p = 1e-11
inv = stats.beta.isf(p, count_list + 1, 100000 - count_list)
assert_allclose(inv, inv_R)
res = stats.beta.sf(inv, count_list + 1, 100000 - count_list)
assert_allclose(res, p)
@pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
def test_issue_12796(self):
# Confirm that Boost's beta distribution succeeds in the case
# of gh-12796
alpha_2 = 5e-6
count_ = np.arange(1, 20)
nobs = 100000
q, a, b = 1 - alpha_2, count_ + 1, nobs - count_
inv = stats.beta.ppf(q, a, b)
res = stats.beta.cdf(inv, a, b)
assert_allclose(res, 1 - alpha_2)
def test_endpoints(self):
# Confirm that boost's beta distribution returns inf at x=1
# when b<1
a, b = 1, 0.5
assert_equal(stats.beta.pdf(1, a, b), np.inf)
# Confirm that boost's beta distribution returns inf at x=0
# when a<1
a, b = 0.2, 3
assert_equal(stats.beta.pdf(0, a, b), np.inf)
# Confirm that boost's beta distribution returns 5 at x=0
# when a=1, b=5
a, b = 1, 5
assert_equal(stats.beta.pdf(0, a, b), 5)
assert_equal(stats.beta.pdf(1e-310, a, b), 5)
# Confirm that boost's beta distribution returns 5 at x=1
# when a=5, b=1
a, b = 5, 1
assert_equal(stats.beta.pdf(1, a, b), 5)
assert_equal(stats.beta.pdf(1-1e-310, a, b), 5)
@pytest.mark.xfail(IS_PYPY, reason="Does not convert boost warning")
def test_boost_eval_issue_14606(self):
q, a, b = 0.995, 1.0e11, 1.0e13
with pytest.warns(RuntimeWarning):
stats.beta.ppf(q, a, b)
@pytest.mark.parametrize('method', [stats.beta.ppf, stats.beta.isf])
@pytest.mark.parametrize('a, b', [(1e-310, 12.5), (12.5, 1e-310)])
def test_beta_ppf_with_subnormal_a_b(self, method, a, b):
# Regression test for gh-17444: beta.ppf(p, a, b) and beta.isf(p, a, b)
# would result in a segmentation fault if either a or b was subnormal.
p = 0.9
# Depending on the version of Boost that we have vendored and
# our setting of the Boost double promotion policy, the call
# `stats.beta.ppf(p, a, b)` might raise an OverflowError or
# return a value. We'll accept either behavior (and not care about
# the value), because our goal here is to verify that the call does
# not trigger a segmentation fault.
try:
method(p, a, b)
except OverflowError:
# The OverflowError exception occurs with Boost 1.80 or earlier
# when Boost's double promotion policy is false; see
# https://github.com/boostorg/math/issues/882
# and
# https://github.com/boostorg/math/pull/883
# Once we have vendored the fixed version of Boost, we can drop
# this try-except wrapper and just call the function.
pass
# entropy accuracy was confirmed using the following mpmath function
# from mpmath import mp
# mp.dps = 50
# def beta_entropy_mpmath(a, b):
# a = mp.mpf(a)
# b = mp.mpf(b)
# entropy = mp.log(mp.beta(a, b)) - (a - 1) * mp.digamma(a) -\
# (b - 1) * mp.digamma(b) + (a + b -2) * mp.digamma(a + b)
# return float(entropy)
@pytest.mark.parametrize('a, b, ref',
[(0.5, 0.5, -0.24156447527049044),
(0.001, 1, -992.0922447210179),
(1, 10000, -8.210440371976183),
(100000, 100000, -5.377247470132859)])
def test_entropy(self, a, b, ref):
assert_allclose(stats.beta(a, b).entropy(), ref)
@pytest.mark.parametrize(
"a, b, ref, tol",
[
(1, 10, -1.4025850929940458, 1e-14),
(10, 20, -1.0567887388936708, 1e-13),
(4e6, 4e6+20, -7.221686009678741, 1e-9),
(5e6, 5e6+10, -7.333257022834638, 1e-8),
(1e10, 1e10+20, -11.133707703130474, 1e-11),
(1e50, 1e50+20, -57.185409562486385, 1e-15),
]
)
def test_extreme_entropy(self, a, b, ref, tol):
# Reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
#
# def beta_entropy_mpmath(a, b):
# a = mp.mpf(a)
# b = mp.mpf(b)
# entropy = (
# mp.log(mp.beta(a, b)) - (a - 1) * mp.digamma(a)
# - (b - 1) * mp.digamma(b) + (a + b - 2) * mp.digamma(a + b)
# )
# return float(entropy)
assert_allclose(stats.beta(a, b).entropy(), ref, rtol=tol)
class TestBetaPrime:
# the test values are used in test_cdf_gh_17631 / test_ppf_gh_17631
# They are computed with mpmath. Example:
# from mpmath import mp
# mp.dps = 50
# a, b = mp.mpf(0.05), mp.mpf(0.1)
# x = mp.mpf(1e22)
# float(mp.betainc(a, b, 0.0, x/(1+x), regularized=True))
# note: we use the values computed by the cdf to test whether
# ppf(cdf(x)) == x (up to a small tolerance)
# since the ppf can be very sensitive to small variations of the input,
# it can be required to generate the test case for the ppf separately,
# see self.test_ppf
cdf_vals = [
(1e22, 100.0, 0.05, 0.8973027435427167),
(1e10, 100.0, 0.05, 0.5911548582766262),
(1e8, 0.05, 0.1, 0.9467768090820048),
(1e8, 100.0, 0.05, 0.4852944858726726),
(1e-10, 0.05, 0.1, 0.21238845427095),
(1e-10, 1.5, 1.5, 1.697652726007973e-15),
(1e-10, 0.05, 100.0, 0.40884514172337383),
(1e-22, 0.05, 0.1, 0.053349567649287326),
(1e-22, 1.5, 1.5, 1.6976527263135503e-33),
(1e-22, 0.05, 100.0, 0.10269725645728331),
(1e-100, 0.05, 0.1, 6.7163126421919795e-06),
(1e-100, 1.5, 1.5, 1.6976527263135503e-150),
(1e-100, 0.05, 100.0, 1.2928818587561651e-05),
]
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
# The expected values for test_ppf() were computed with mpmath, e.g.
#
# from mpmath import mp
# mp.dps = 125
# p = 0.01
# a, b = 1.25, 2.5
# x = mp.findroot(lambda t: mp.betainc(a, b, x1=0, x2=t/(1+t),
# regularized=True) - p,
# x0=(0.01, 0.011), method='secant')
# print(float(x))
#
# prints
#
# 0.01080162700956614
#
@pytest.mark.parametrize(
'p, a, b, expected',
[(0.010, 1.25, 2.5, 0.01080162700956614),
(1e-12, 1.25, 2.5, 1.0610141996279122e-10),
(1e-18, 1.25, 2.5, 1.6815941817974941e-15),
(1e-17, 0.25, 7.0, 1.0179194531881782e-69),
(0.375, 0.25, 7.0, 0.002036820346115211),
(0.9978811466052919, 0.05, 0.1, 1.0000000000001218e22),]
)
def test_ppf(self, p, a, b, expected):
x = stats.betaprime.ppf(p, a, b)
assert_allclose(x, expected, rtol=1e-14)
@pytest.mark.parametrize('x, a, b, p', cdf_vals)
def test_ppf_gh_17631(self, x, a, b, p):
assert_allclose(stats.betaprime.ppf(p, a, b), x, rtol=1e-14)
@pytest.mark.parametrize(
'x, a, b, expected',
cdf_vals + [
(1e10, 1.5, 1.5, 0.9999999999999983),
(1e10, 0.05, 0.1, 0.9664184367890859),
(1e22, 0.05, 0.1, 0.9978811466052919),
])
def test_cdf_gh_17631(self, x, a, b, expected):
assert_allclose(stats.betaprime.cdf(x, a, b), expected, rtol=1e-14)
@pytest.mark.parametrize(
'x, a, b, expected',
[(1e50, 0.05, 0.1, 0.9999966641709545),
(1e50, 100.0, 0.05, 0.995925162631006)])
def test_cdf_extreme_tails(self, x, a, b, expected):
# for even more extreme values, we only get a few correct digits
# results are still < 1
y = stats.betaprime.cdf(x, a, b)
assert y < 1.0
assert_allclose(y, expected, rtol=2e-5)
def test_sf(self):
# reference values were computed via the reference distribution,
# e.g.
# mp.dps = 50
# a, b = 5, 3
# x = 1e10
# BetaPrime(a=a, b=b).sf(x); returns 3.4999999979e-29
a = [5, 4, 2, 0.05, 0.05, 0.05, 0.05, 100.0, 100.0, 0.05, 0.05,
0.05, 1.5, 1.5]
b = [3, 2, 1, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05, 100.0, 100.0,
100.0, 1.5, 1.5]
x = [1e10, 1e20, 1e30, 1e22, 1e-10, 1e-22, 1e-100, 1e22, 1e10,
1e-10, 1e-22, 1e-100, 1e10, 1e-10]
ref = [3.4999999979e-29, 9.999999999994357e-40, 1.9999999999999998e-30,
0.0021188533947081017, 0.78761154572905, 0.9466504323507127,
0.9999932836873578, 0.10269725645728331, 0.40884514172337383,
0.5911548582766262, 0.8973027435427167, 0.9999870711814124,
1.6976527260079727e-15, 0.9999999999999983]
sf_values = stats.betaprime.sf(x, a, b)
assert_allclose(sf_values, ref, rtol=1e-12)
def test_fit_stats_gh18274(self):
# gh-18274 reported spurious warning emitted when fitting `betaprime`
# to data. Some of these were emitted by stats, too. Check that the
# warnings are no longer emitted.
stats.betaprime.fit([0.1, 0.25, 0.3, 1.2, 1.6], floc=0, fscale=1)
stats.betaprime(a=1, b=1).stats('mvsk')
def test_moment_gh18634(self):
# Testing for gh-18634 revealed that `betaprime` raised a
# NotImplementedError for higher moments. Check that this is
# resolved. Parameters are arbitrary but lie on either side of the
# moment order (5) to test both branches of `_lazywhere`. Reference
# values produced with Mathematica, e.g.
# `Moment[BetaPrimeDistribution[2,7],5]`
ref = [np.inf, 0.867096912929055]
res = stats.betaprime(2, [4.2, 7.1]).moment(5)
assert_allclose(res, ref)
class TestGamma:
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
def test_isf(self):
# Test cases for when the probability is very small. See gh-13664.
# The expected values can be checked with mpmath. With mpmath,
# the survival function sf(x, k) can be computed as
#
# mpmath.gammainc(k, x, mpmath.inf, regularized=True)
#
# Here we have:
#
# >>> mpmath.mp.dps = 60
# >>> float(mpmath.gammainc(1, 39.14394658089878, mpmath.inf,
# ... regularized=True))
# 9.99999999999999e-18
# >>> float(mpmath.gammainc(100, 330.6557590436547, mpmath.inf,
# regularized=True))
# 1.000000000000028e-50
#
assert np.isclose(stats.gamma.isf(1e-17, 1),
39.14394658089878, atol=1e-14)
assert np.isclose(stats.gamma.isf(1e-50, 100),
330.6557590436547, atol=1e-13)
@pytest.mark.parametrize('scale', [1.0, 5.0])
def test_delta_cdf(self, scale):
# Expected value computed with mpmath:
#
# >>> import mpmath
# >>> mpmath.mp.dps = 150
# >>> cdf1 = mpmath.gammainc(3, 0, 245, regularized=True)
# >>> cdf2 = mpmath.gammainc(3, 0, 250, regularized=True)
# >>> float(cdf2 - cdf1)
# 1.1902609356171962e-102
#
delta = stats.gamma._delta_cdf(scale*245, scale*250, 3, scale=scale)
assert_allclose(delta, 1.1902609356171962e-102, rtol=1e-13)
@pytest.mark.parametrize('a, ref, rtol',
[(1e-4, -9990.366610819761, 1e-15),
(2, 1.5772156649015328, 1e-15),
(100, 3.7181819485047463, 1e-13),
(1e4, 6.024075385026086, 1e-15),
(1e18, 22.142204370151084, 1e-15),
(1e100, 116.54819318290696, 1e-15)])
def test_entropy(self, a, ref, rtol):
# expected value computed with mpmath:
# from mpmath import mp
# mp.dps = 500
# def gamma_entropy_reference(x):
# x = mp.mpf(x)
# return float(mp.digamma(x) * (mp.one - x) + x + mp.loggamma(x))
assert_allclose(stats.gamma.entropy(a), ref, rtol=rtol)
class TestDgamma:
def test_pdf(self):
rng = np.random.default_rng(3791303244302340058)
size = 10 # number of points to check
x = rng.normal(scale=10, size=size)
a = rng.uniform(high=10, size=size)
res = stats.dgamma.pdf(x, a)
ref = stats.gamma.pdf(np.abs(x), a) / 2
assert_allclose(res, ref)
dist = stats.dgamma(a)
# There was an intermittent failure with assert_equal on Linux - 32 bit
assert_allclose(dist.pdf(x), res, rtol=5e-16)
# mpmath was used to compute the expected values.
# For x < 0, cdf(x, a) is mp.gammainc(a, -x, mp.inf, regularized=True)/2
# For x > 0, cdf(x, a) is (1 + mp.gammainc(a, 0, x, regularized=True))/2
# E.g.
# from mpmath import mp
# mp.dps = 50
# print(float(mp.gammainc(1, 20, mp.inf, regularized=True)/2))
# prints
# 1.030576811219279e-09
@pytest.mark.parametrize('x, a, expected',
[(-20, 1, 1.030576811219279e-09),
(-40, 1, 2.1241771276457944e-18),
(-50, 5, 2.7248509914602648e-17),
(-25, 0.125, 5.333071920958156e-14),
(5, 1, 0.9966310265004573)])
def test_cdf_ppf_sf_isf_tail(self, x, a, expected):
cdf = stats.dgamma.cdf(x, a)
assert_allclose(cdf, expected, rtol=5e-15)
ppf = stats.dgamma.ppf(expected, a)
assert_allclose(ppf, x, rtol=5e-15)
sf = stats.dgamma.sf(-x, a)
assert_allclose(sf, expected, rtol=5e-15)
isf = stats.dgamma.isf(expected, a)
assert_allclose(isf, -x, rtol=5e-15)
@pytest.mark.parametrize("a, ref",
[(1.5, 2.0541199559354117),
(1.3, 1.9357296377121247),
(1.1, 1.7856502333412134)])
def test_entropy(self, a, ref):
# The reference values were calculated with mpmath:
# def entropy_dgamma(a):
# def pdf(x):
# A = mp.one / (mp.mpf(2.) * mp.gamma(a))
# B = mp.fabs(x) ** (a - mp.one)
# C = mp.exp(-mp.fabs(x))
# h = A * B * C
# return h
#
# return -mp.quad(lambda t: pdf(t) * mp.log(pdf(t)),
# [-mp.inf, mp.inf])
assert_allclose(stats.dgamma.entropy(a), ref, rtol=1e-14)
@pytest.mark.parametrize("a, ref",
[(1e-100, -1e+100),
(1e-10, -9999999975.858217),
(1e-5, -99987.37111657023),
(1e4, 6.717222565586032),
(1000000000000000.0, 19.38147391121996),
(1e+100, 117.2413403634669)])
def test_entropy_entreme_values(self, a, ref):
# The reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
# def second_dgamma(a):
# a = mp.mpf(a)
# x_1 = a + mp.log(2) + mp.loggamma(a)
# x_2 = (mp.one - a) * mp.digamma(a)
# h = x_1 + x_2
# return h
assert_allclose(stats.dgamma.entropy(a), ref, rtol=1e-10)
def test_entropy_array_input(self):
x = np.array([1, 5, 1e20, 1e-5])
y = stats.dgamma.entropy(x)
for i in range(len(y)):
assert y[i] == stats.dgamma.entropy(x[i])
class TestChi2:
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
# Entropy references values were computed with the following mpmath code
# from mpmath import mp
# mp.dps = 50
# def chisq_entropy_mpmath(df):
# df = mp.mpf(df)
# half_df = 0.5 * df
# entropy = (half_df + mp.log(2) + mp.log(mp.gamma(half_df)) +
# (mp.one - half_df) * mp.digamma(half_df))
# return float(entropy)
@pytest.mark.parametrize('df, ref',
[(1e-4, -19988.980448690163),
(1, 0.7837571104739337),
(100, 4.061397128938114),
(251, 4.525577254045129),
(1e15, 19.034900320939986)])
def test_entropy(self, df, ref):
assert_allclose(stats.chi2(df).entropy(), ref, rtol=1e-13)
class TestGumbelL:
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
@pytest.mark.parametrize('loc', [-1, 1])
def test_fit_fixed_param(self, loc):
# ensure fixed location is correctly reflected from `gumbel_r.fit`
# See comments at end of gh-12737.
data = stats.gumbel_l.rvs(size=100, loc=loc)
fitted_loc, _ = stats.gumbel_l.fit(data, floc=loc)
assert_equal(fitted_loc, loc)
class TestGumbelR:
def test_sf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50)))
# 1.9287498479639178e-22
assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22,
rtol=1e-14)
def test_isf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17)))
# 39.14394658089878
assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878,
rtol=1e-14)
class TestLevyStable:
@pytest.fixture(autouse=True)
def reset_levy_stable_params(self):
"""Setup default parameters for levy_stable generator"""
stats.levy_stable.parameterization = "S1"
stats.levy_stable.cdf_default_method = "piecewise"
stats.levy_stable.pdf_default_method = "piecewise"
stats.levy_stable.quad_eps = stats._levy_stable._QUAD_EPS
@pytest.fixture
def nolan_pdf_sample_data(self):
"""Sample data points for pdf computed with Nolan's stablec
See - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
The data table loaded below is generated from Nolan's stablec
with the following parameter space:
alpha = 0.1, 0.2, ..., 2.0
beta = -1.0, -0.9, ..., 1.0
p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
and the equivalent for the right tail
Typically inputs for stablec:
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-Z1-pdf-sample-data.npy'
)
data = np.rec.fromarrays(data.T, names='x,p,alpha,beta,pct')
return data
@pytest.fixture
def nolan_cdf_sample_data(self):
"""Sample data points for cdf computed with Nolan's stablec
See - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
The data table loaded below is generated from Nolan's stablec
with the following parameter space:
alpha = 0.1, 0.2, ..., 2.0
beta = -1.0, -0.9, ..., 1.0
p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
and the equivalent for the right tail
Ideally, Nolan's output for CDF values should match the percentile
from where they have been sampled from. Even more so as we extract
percentile x positions from stablec too. However, we note at places
Nolan's stablec will produce absolute errors in order of 1e-5. We
compare against his calculations here. In future, once we less
reliant on Nolan's paper we might switch to comparing directly at
percentiles (those x values being produced from some alternative
means).
Typically inputs for stablec:
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-Z1-cdf-sample-data.npy'
)
data = np.rec.fromarrays(data.T, names='x,p,alpha,beta,pct')
return data
@pytest.fixture
def nolan_loc_scale_sample_data(self):
"""Sample data where loc, scale are different from 0, 1
Data extracted in similar way to pdf/cdf above using
Nolan's stablec but set to an arbitrary location scale of
(2, 3) for various important parameters alpha, beta and for
parameterisations S0 and S1.
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-loc-scale-sample-data.npy'
)
return data
@pytest.mark.parametrize(
"sample_size", [
pytest.param(50), pytest.param(1500, marks=pytest.mark.slow)
]
)
@pytest.mark.parametrize("parameterization", ["S0", "S1"])
@pytest.mark.parametrize(
"alpha,beta", [(1.0, 0), (1.0, -0.5), (1.5, 0), (1.9, 0.5)]
)
@pytest.mark.parametrize("gamma,delta", [(1, 0), (3, 2)])
def test_rvs(
self,
parameterization,
alpha,
beta,
gamma,
delta,
sample_size,
):
stats.levy_stable.parameterization = parameterization
ls = stats.levy_stable(
alpha=alpha, beta=beta, scale=gamma, loc=delta
)
_, p = stats.kstest(
ls.rvs(size=sample_size, random_state=1234), ls.cdf
)
assert p > 0.05
@pytest.mark.slow
@pytest.mark.parametrize('beta', [0.5, 1])
def test_rvs_alpha1(self, beta):
"""Additional test cases for rvs for alpha equal to 1."""
np.random.seed(987654321)
alpha = 1.0
loc = 0.5
scale = 1.5
x = stats.levy_stable.rvs(alpha, beta, loc=loc, scale=scale,
size=5000)
stat, p = stats.kstest(x, 'levy_stable',
args=(alpha, beta, loc, scale))
assert p > 0.01
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [
-.05413, -.05413, 0., 0., 0., 0., .00533, .00533, .00533, .00533,
.00533, .03354, .03354, .03354, .03354, .03354, .05309, .05309,
.05309, .05309, .05309
]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(
loc1, 0.00233, 2
) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309, .05309, .05309, .05309, .05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.xfail(reason="Unknown problem with fitstart.")
@pytest.mark.parametrize(
"alpha,beta,delta,gamma",
[
(1.5, 0.4, 2, 3),
(1.0, 0.4, 2, 3),
]
)
@pytest.mark.parametrize(
"parametrization", ["S0", "S1"]
)
def test_fit_rvs(self, alpha, beta, delta, gamma, parametrization):
"""Test that fit agrees with rvs for each parametrization."""
stats.levy_stable.parametrization = parametrization
data = stats.levy_stable.rvs(
alpha, beta, loc=delta, scale=gamma, size=10000, random_state=1234
)
fit = stats.levy_stable._fitstart(data)
alpha_obs, beta_obs, delta_obs, gamma_obs = fit
assert_allclose(
[alpha, beta, delta, gamma],
[alpha_obs, beta_obs, delta_obs, gamma_obs],
rtol=0.01,
)
def test_fit_beta_flip(self):
# Confirm that sign of beta affects loc, not alpha or scale.
x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100])
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x)
assert_equal(beta1, 1)
assert loc1 != 0
assert_almost_equal(alpha2, alpha1)
assert_almost_equal(beta2, -beta1)
assert_almost_equal(loc2, -loc1)
assert_almost_equal(scale2, scale1)
def test_fit_delta_shift(self):
# Confirm that loc slides up and down if data shifts.
SHIFT = 1
x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100])
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(-x)
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x + SHIFT)
assert_almost_equal(alpha2, alpha1)
assert_almost_equal(beta2, beta1)
assert_almost_equal(loc2, loc1 + SHIFT)
assert_almost_equal(scale2, scale1)
def test_fit_loc_extrap(self):
# Confirm that loc goes out of sample for alpha close to 1.
x = [1, 1, 3, 3, 10, 10, 10, 30, 30, 140, 140]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert alpha1 < 1, f"Expected alpha < 1, got {alpha1}"
assert loc1 < min(x), f"Expected loc < {min(x)}, got {loc1}"
x2 = [1, 1, 3, 3, 10, 10, 10, 30, 30, 130, 130]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert alpha2 > 1, f"Expected alpha > 1, got {alpha2}"
assert loc2 > max(x2), f"Expected loc > {max(x2)}, got {loc2}"
@pytest.mark.parametrize(
"pct_range,alpha_range,beta_range", [
pytest.param(
[.01, .5, .99],
[.1, 1, 2],
[-1, 0, .8],
),
pytest.param(
[.01, .05, .5, .95, .99],
[.1, .5, 1, 1.5, 2],
[-.9, -.5, 0, .3, .6, 1],
marks=pytest.mark.slow
),
pytest.param(
[.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
np.linspace(0.1, 2, 20),
np.linspace(-1, 1, 21),
marks=pytest.mark.xslow,
),
]
)
def test_pdf_nolan_samples(
self, nolan_pdf_sample_data, pct_range, alpha_range, beta_range
):
"""Test pdf values against Nolan's stablec.exe output"""
data = nolan_pdf_sample_data
# some tests break on linux 32 bit
uname = platform.uname()
is_linux_32 = uname.system == 'Linux' and uname.machine == 'i686'
platform_desc = "/".join(
[uname.system, uname.machine, uname.processor])
# fmt: off
# There are a number of cases which fail on some but not all platforms.
# These are excluded by the filters below. TODO: Rewrite tests so that
# the now filtered out test cases are still run but marked in pytest as
# expected to fail.
tests = [
[
'dni', 1e-7, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
~(
(
(r['beta'] == 0) &
(r['pct'] == 0.5)
) |
(
(r['beta'] >= 0.9) &
(r['alpha'] >= 1.6) &
(r['pct'] == 0.5)
) |
(
(r['alpha'] <= 0.4) &
np.isin(r['pct'], [.01, .99])
) |
(
(r['alpha'] <= 0.3) &
np.isin(r['pct'], [.05, .95])
) |
(
(r['alpha'] <= 0.2) &
np.isin(r['pct'], [.1, .9])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.25, .75]) &
np.isin(np.abs(r['beta']), [.5, .6, .7])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.5]) &
np.isin(np.abs(r['beta']), [.1])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.35, .65]) &
np.isin(np.abs(r['beta']), [-.4, -.3, .3, .4, .5])
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == 0.5) &
(r['pct'] == 0.25)
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == -0.3) &
(r['pct'] == 0.65)
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == 0.3) &
(r['pct'] == 0.35)
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.5]) &
np.isin(np.abs(r['beta']), [.1, .2, .3, .4])
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.35, .65]) &
np.isin(np.abs(r['beta']), [.8, .9, 1.])
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.01, .99]) &
np.isin(np.abs(r['beta']), [-.1, .1])
) |
# various points ok but too sparse to list
(r['alpha'] >= 1.1)
)
)
],
# piecewise generally good accuracy
[
'piecewise', 1e-11, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 0.2) &
(r['alpha'] != 1.)
)
],
# for alpha = 1. for linux 32 bit optimize.bisect
# has some issues for .01 and .99 percentile
[
'piecewise', 1e-11, lambda r: (
(r['alpha'] == 1.) &
(not is_linux_32) &
np.isin(r['pct'], pct_range) &
(1. in alpha_range) &
np.isin(r['beta'], beta_range)
)
],
# for small alpha very slightly reduced accuracy
[
'piecewise', 2.5e-10, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] <= 0.2)
)
],
# fft accuracy reduces as alpha decreases
[
'fft-simpson', 1e-5, lambda r: (
(r['alpha'] >= 1.9) &
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range)
),
],
[
'fft-simpson', 1e-6, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1) &
(r['alpha'] < 1.9)
)
],
# fft relative errors for alpha < 1, will raise if enabled
# ['fft-simpson', 1e-4, lambda r: r['alpha'] == 0.9],
# ['fft-simpson', 1e-3, lambda r: r['alpha'] == 0.8],
# ['fft-simpson', 1e-2, lambda r: r['alpha'] == 0.7],
# ['fft-simpson', 1e-1, lambda r: r['alpha'] == 0.6],
]
# fmt: on
for ix, (default_method, rtol,
filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
subdata = data[filter_func(data)
] if filter_func is not None else data
with suppress_warnings() as sup:
# occurs in FFT methods only
sup.record(
RuntimeWarning,
"Density calculations experimental for FFT method.*"
)
p = stats.levy_stable.pdf(
subdata['x'],
subdata['alpha'],
subdata['beta'],
scale=1,
loc=0
)
with np.errstate(over="ignore"):
subdata2 = rec_append_fields(
subdata,
['calc', 'abserr', 'relerr'],
[
p,
np.abs(p - subdata['p']),
np.abs(p - subdata['p']) / np.abs(subdata['p'])
]
)
failures = subdata2[
(subdata2['relerr'] >= rtol) |
np.isnan(p)
]
assert_allclose(
p,
subdata['p'],
rtol,
err_msg="pdf test %s failed with method '%s'"
" [platform: %s]\n%s\n%s" %
(ix, default_method, platform_desc, failures.dtype.names,
failures),
verbose=False
)
@pytest.mark.parametrize(
"pct_range,alpha_range,beta_range", [
pytest.param(
[.01, .5, .99],
[.1, 1, 2],
[-1, 0, .8],
),
pytest.param(
[.01, .05, .5, .95, .99],
[.1, .5, 1, 1.5, 2],
[-.9, -.5, 0, .3, .6, 1],
marks=pytest.mark.slow
),
pytest.param(
[.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
np.linspace(0.1, 2, 20),
np.linspace(-1, 1, 21),
marks=pytest.mark.xslow,
),
]
)
def test_cdf_nolan_samples(
self, nolan_cdf_sample_data, pct_range, alpha_range, beta_range
):
""" Test cdf values against Nolan's stablec.exe output."""
data = nolan_cdf_sample_data
tests = [
# piecewise generally good accuracy
[
'piecewise', 2e-12, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
~(
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
(r['pct'] == 0.01)
) |
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [0.1, 0.2, 0.3]) &
(r['pct'] == 0.99)
)
)
)
],
# for some points with alpha=1, Nolan's STABLE clearly
# loses accuracy
[
'piecewise', 5e-2, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
(r['pct'] == 0.01)
) |
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [0.1, 0.2, 0.3]) &
(r['pct'] == 0.99)
)
)
],
# fft accuracy poor, very poor alpha < 1
[
'fft-simpson', 1e-5, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.7)
)
],
[
'fft-simpson', 1e-4, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.5) &
(r['alpha'] <= 1.7)
)
],
[
'fft-simpson', 1e-3, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.3) &
(r['alpha'] <= 1.5)
)
],
[
'fft-simpson', 1e-2, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.0) &
(r['alpha'] <= 1.3)
)
],
]
for ix, (default_method, rtol,
filter_func) in enumerate(tests):
stats.levy_stable.cdf_default_method = default_method
subdata = data[filter_func(data)
] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(
RuntimeWarning,
'Cumulative density calculations experimental for FFT'
+ ' method. Use piecewise method instead.*'
)
p = stats.levy_stable.cdf(
subdata['x'],
subdata['alpha'],
subdata['beta'],
scale=1,
loc=0
)
with np.errstate(over="ignore"):
subdata2 = rec_append_fields(
subdata,
['calc', 'abserr', 'relerr'],
[
p,
np.abs(p - subdata['p']),
np.abs(p - subdata['p']) / np.abs(subdata['p'])
]
)
failures = subdata2[
(subdata2['relerr'] >= rtol) |
np.isnan(p)
]
assert_allclose(
p,
subdata['p'],
rtol,
err_msg="cdf test %s failed with method '%s'\n%s\n%s" %
(ix, default_method, failures.dtype.names, failures),
verbose=False
)
@pytest.mark.parametrize("param", [0, 1])
@pytest.mark.parametrize("case", ["pdf", "cdf"])
def test_location_scale(
self, nolan_loc_scale_sample_data, param, case
):
"""Tests for pdf and cdf where loc, scale are different from 0, 1
"""
uname = platform.uname()
is_linux_32 = uname.system == 'Linux' and "32bit" in platform.architecture()[0]
# Test seems to be unstable (see gh-17839 for a bug report on Debian
# i386), so skip it.
if is_linux_32 and case == 'pdf':
pytest.skip("Test unstable on some platforms; see gh-17839, 17859")
data = nolan_loc_scale_sample_data
# We only test against piecewise as location/scale transforms
# are same for other methods.
stats.levy_stable.cdf_default_method = "piecewise"
stats.levy_stable.pdf_default_method = "piecewise"
subdata = data[data["param"] == param]
stats.levy_stable.parameterization = f"S{param}"
assert case in ["pdf", "cdf"]
function = (
stats.levy_stable.pdf if case == "pdf" else stats.levy_stable.cdf
)
v1 = function(
subdata['x'], subdata['alpha'], subdata['beta'], scale=2, loc=3
)
assert_allclose(v1, subdata[case], 1e-5)
@pytest.mark.parametrize(
"method,decimal_places",
[
['dni', 4],
['piecewise', 4],
]
)
def test_pdf_alpha_equals_one_beta_non_zero(self, method, decimal_places):
""" sample points extracted from Tables and Graphs of Stable
Probability Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
)
density = np.array(
[
.3183, .3096, .2925, .2622, .1591, .1587, .1599, .1635, .0637,
.0729, .0812, .0955, .0318, .0390, .0458, .0586, .0187, .0236,
.0285, .0384
]
)
betas = np.array(
[
0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0,
.25, .5, 1
]
)
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(
category=RuntimeWarning,
message="Density calculation unstable.*"
)
stats.levy_stable.pdf_default_method = method
# stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(
pdf, density, decimal_places, method
)
@pytest.mark.parametrize(
"params,expected",
[
[(1.48, -.22, 0, 1), (0, np.inf, np.nan, np.nan)],
[(2, .9, 10, 1.5), (10, 4.5, 0, 0)]
]
)
def test_stats(self, params, expected):
observed = stats.levy_stable.stats(
params[0], params[1], loc=params[2], scale=params[3],
moments='mvsk'
)
assert_almost_equal(observed, expected)
@pytest.mark.parametrize('alpha', [0.25, 0.5, 0.75])
@pytest.mark.parametrize(
'function,beta,points,expected',
[
(
stats.levy_stable.cdf,
1.0,
np.linspace(-25, 0, 10),
0.0,
),
(
stats.levy_stable.pdf,
1.0,
np.linspace(-25, 0, 10),
0.0,
),
(
stats.levy_stable.cdf,
-1.0,
np.linspace(0, 25, 10),
1.0,
),
(
stats.levy_stable.pdf,
-1.0,
np.linspace(0, 25, 10),
0.0,
)
]
)
def test_distribution_outside_support(
self, alpha, function, beta, points, expected
):
"""Ensure the pdf/cdf routines do not return nan outside support.
This distribution's support becomes truncated in a few special cases:
support is [mu, infty) if alpha < 1 and beta = 1
support is (-infty, mu] if alpha < 1 and beta = -1
Otherwise, the support is all reals. Here, mu is zero by default.
"""
assert 0 < alpha < 1
assert_almost_equal(
function(points, alpha=alpha, beta=beta),
np.full(len(points), expected)
)
@pytest.mark.parametrize(
'x,alpha,beta,expected',
# Reference values from Matlab
# format long
# alphas = [1.7720732804618808, 1.9217001522410235, 1.5654806051633634,
# 1.7420803447784388, 1.5748002527689913];
# betas = [0.5059373136902996, -0.8779442746685926, -0.4016220341911392,
# -0.38180029468259247, -0.25200194914153684];
# x0s = [0, 1e-4, -1e-4];
# for x0 = x0s
# disp("x0 = " + x0)
# for ii = 1:5
# alpha = alphas(ii);
# beta = betas(ii);
# pd = makedist('Stable','alpha',alpha,'beta',beta,'gam',1,'delta',0);
# % we need to adjust x. It is the same as x = 0 In scipy.
# x = x0 - beta * tan(pi * alpha / 2);
# disp(pd.pdf(x))
# end
# end
[
(0, 1.7720732804618808, 0.5059373136902996, 0.278932636798268),
(0, 1.9217001522410235, -0.8779442746685926, 0.281054757202316),
(0, 1.5654806051633634, -0.4016220341911392, 0.271282133194204),
(0, 1.7420803447784388, -0.38180029468259247, 0.280202199244247),
(0, 1.5748002527689913, -0.25200194914153684, 0.280136576218665),
]
)
def test_x_equal_zeta(
self, x, alpha, beta, expected
):
"""Test pdf for x equal to zeta.
With S1 parametrization: x0 = x + zeta if alpha != 1 So, for x = 0, x0
will be close to zeta.
When case "x equal zeta" is not handled properly and quad_eps is not
low enough: - pdf may be less than 0 - logpdf is nan
The points from the parametrize block are found randomly so that PDF is
less than 0.
Reference values taken from MATLAB
https://www.mathworks.com/help/stats/stable-distribution.html
"""
stats.levy_stable.quad_eps = 1.2e-11
assert_almost_equal(
stats.levy_stable.pdf(x, alpha=alpha, beta=beta),
expected,
)
@pytest.mark.xfail
@pytest.mark.parametrize(
# See comment for test_x_equal_zeta for script for reference values
'x,alpha,beta,expected',
[
(1e-4, 1.7720732804618808, 0.5059373136902996, 0.278929165340670),
(1e-4, 1.9217001522410235, -0.8779442746685926, 0.281056564327953),
(1e-4, 1.5654806051633634, -0.4016220341911392, 0.271252432161167),
(1e-4, 1.7420803447784388, -0.38180029468259247, 0.280205311264134),
(1e-4, 1.5748002527689913, -0.25200194914153684, 0.280140965235426),
(-1e-4, 1.7720732804618808, 0.5059373136902996, 0.278936106741754),
(-1e-4, 1.9217001522410235, -0.8779442746685926, 0.281052948629429),
(-1e-4, 1.5654806051633634, -0.4016220341911392, 0.271275394392385),
(-1e-4, 1.7420803447784388, -0.38180029468259247, 0.280199085645099),
(-1e-4, 1.5748002527689913, -0.25200194914153684, 0.280132185432842),
]
)
def test_x_near_zeta(
self, x, alpha, beta, expected
):
"""Test pdf for x near zeta.
With S1 parametrization: x0 = x + zeta if alpha != 1 So, for x = 0, x0
will be close to zeta.
When case "x near zeta" is not handled properly and quad_eps is not
low enough: - pdf may be less than 0 - logpdf is nan
The points from the parametrize block are found randomly so that PDF is
less than 0.
Reference values taken from MATLAB
https://www.mathworks.com/help/stats/stable-distribution.html
"""
stats.levy_stable.quad_eps = 1.2e-11
assert_almost_equal(
stats.levy_stable.pdf(x, alpha=alpha, beta=beta),
expected,
)
class TestArrayArgument: # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring:
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
def test_args_reduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * np.size(a))
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * np.size(a))
class TestFitMethod:
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['expon', 'norm', 'uniform']
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(ValueError, distfunc.fit, x, fscale=1)
assert_raises(ValueError, distfunc.fit, y, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, np.ptp(x))
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
@pytest.mark.slow
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_fshapes(self, method):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3., method=method)
res_2 = stats.beta.fit(x, fa=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4., method=method)
res_4 = stats.beta.fit(x, fb=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2, method=method)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3, method=method)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1, method=method)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a, method=method)
assert_equal(aa, a)
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_extra_params(self, method):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct, method=method)
class TestFrozen:
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
with np.errstate(invalid="ignore", divide="ignore"):
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect:
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
def test_challenging_cases_gh8928(self):
# Several cases where `expect` failed to produce a correct result were
# reported in gh-8928. Check that these cases have been resolved.
assert_allclose(stats.norm.expect(loc=36, scale=1.0), 36)
assert_allclose(stats.norm.expect(loc=40, scale=1.0), 40)
assert_allclose(stats.norm.expect(loc=10, scale=0.1), 10)
assert_allclose(stats.gamma.expect(args=(148,)), 148)
assert_allclose(stats.logistic.expect(loc=85), 85)
def test_lb_ub_gh15855(self):
# Make sure changes to `expect` made in gh15855 treat lb/ub correctly
dist = stats.uniform
ref = dist.mean(loc=10, scale=5) # 12.5
# moment over whole distribution
assert_allclose(dist.expect(loc=10, scale=5), ref)
# moment over whole distribution, lb and ub outside of support
assert_allclose(dist.expect(loc=10, scale=5, lb=9, ub=16), ref)
# moment over 60% of distribution, [lb, ub] centered within support
assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=14), ref*0.6)
# moment over truncated distribution, essentially
assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=14,
conditional=True), ref)
# moment over 40% of distribution, [lb, ub] not centered within support
assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=13), 12*0.4)
# moment with lb > ub
assert_allclose(dist.expect(loc=10, scale=5, lb=13, ub=11), -12*0.4)
# moment with lb > ub, conditional
assert_allclose(dist.expect(loc=10, scale=5, lb=13, ub=11,
conditional=True), 12)
class TestNct:
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=0.9, nc=0.3, moments='mvsk')
assert_equal([m, v, s, k], [np.nan, np.nan, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.nan, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
def test_nct_stats_large_df_values(self):
# previously gamma function was used which lost precision at df=345
# cf. https://github.com/scipy/scipy/issues/12919 for details
nct_mean_df_1000 = stats.nct.mean(1000, 2)
nct_stats_df_1000 = stats.nct.stats(1000, 2)
# These expected values were computed with mpmath. They were also
# verified with the Wolfram Alpha expressions:
# Mean[NoncentralStudentTDistribution[1000, 2]]
# Var[NoncentralStudentTDistribution[1000, 2]]
expected_stats_df_1000 = [2.0015015641422464, 1.0040115288163005]
assert_allclose(nct_mean_df_1000, expected_stats_df_1000[0],
rtol=1e-10)
assert_allclose(nct_stats_df_1000, expected_stats_df_1000,
rtol=1e-10)
# and a bigger df value
nct_mean = stats.nct.mean(100000, 2)
nct_stats = stats.nct.stats(100000, 2)
# These expected values were computed with mpmath.
expected_stats = [2.0000150001562518, 1.0000400011500288]
assert_allclose(nct_mean, expected_stats[0], rtol=1e-10)
assert_allclose(nct_stats, expected_stats, rtol=1e-9)
def test_cdf_large_nc(self):
# gh-17916 reported a crash with large `nc` values
assert_allclose(stats.nct.cdf(2, 2, float(2**16)), 0)
class TestRecipInvGauss:
def test_pdf_endpoint(self):
p = stats.recipinvgauss.pdf(0, 0.6)
assert p == 0.0
def test_logpdf_endpoint(self):
logp = stats.recipinvgauss.logpdf(0, 0.6)
assert logp == -np.inf
def test_cdf_small_x(self):
# The expected value was computer with mpmath:
#
# import mpmath
#
# mpmath.mp.dps = 100
#
# def recipinvgauss_cdf_mp(x, mu):
# x = mpmath.mpf(x)
# mu = mpmath.mpf(mu)
# trm1 = 1/mu - x
# trm2 = 1/mu + x
# isqx = 1/mpmath.sqrt(x)
# return (mpmath.ncdf(-isqx*trm1)
# - mpmath.exp(2/mu)*mpmath.ncdf(-isqx*trm2))
#
p = stats.recipinvgauss.cdf(0.05, 0.5)
expected = 6.590396159501331e-20
assert_allclose(p, expected, rtol=1e-14)
def test_sf_large_x(self):
# The expected value was computed with mpmath; see test_cdf_small.
p = stats.recipinvgauss.sf(80, 0.5)
expected = 2.699819200556787e-18
assert_allclose(p, expected, 5e-15)
class TestRice:
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
def test_rice_gh9836(self):
# test that gh-9836 is resolved; previously jumped to 1 at the end
cdf = stats.rice.cdf(np.arange(10, 160, 10), np.arange(10, 160, 10))
# Generated in R
# library(VGAM)
# options(digits=16)
# x = seq(10, 150, 10)
# print(price(x, sigma=1, vee=x))
cdf_exp = [0.4800278103504522, 0.4900233218590353, 0.4933500379379548,
0.4950128317658719, 0.4960103776798502, 0.4966753655438764,
0.4971503395812474, 0.4975065620443196, 0.4977836197921638,
0.4980052636649550, 0.4981866072661382, 0.4983377260666599,
0.4984655952615694, 0.4985751970541413, 0.4986701850071265]
assert_allclose(cdf, cdf_exp)
probabilities = np.arange(0.1, 1, 0.1)
ppf = stats.rice.ppf(probabilities, 500/4, scale=4)
# Generated in R
# library(VGAM)
# options(digits=16)
# p = seq(0.1, .9, by = .1)
# print(qrice(p, vee = 500, sigma = 4))
ppf_exp = [494.8898762347361, 496.6495690858350, 497.9184315188069,
499.0026277378915, 500.0159999146250, 501.0293721352668,
502.1135684981884, 503.3824312270405, 505.1421247157822]
assert_allclose(ppf, ppf_exp)
ppf = scipy.stats.rice.ppf(0.5, np.arange(10, 150, 10))
# Generated in R
# library(VGAM)
# options(digits=16)
# b <- seq(10, 140, 10)
# print(qrice(0.5, vee = b, sigma = 1))
ppf_exp = [10.04995862522287, 20.02499480078302, 30.01666512465732,
40.01249934924363, 50.00999966676032, 60.00833314046875,
70.00714273568241, 80.00624991862573, 90.00555549840364,
100.00499995833597, 110.00454542324384, 120.00416664255323,
130.00384613488120, 140.00357141338748]
assert_allclose(ppf, ppf_exp)
class TestErlang:
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]),
stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh:
def setup_method(self):
np.random.seed(987654321)
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
@pytest.mark.parametrize("rvs_loc,rvs_scale", [(0.85373171, 0.86932204),
(0.20558821, 0.61621008)])
def test_fit(self, rvs_loc, rvs_scale):
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
def scale_mle(data, floc):
return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5
# when `floc` is provided, `scale` is found with an analytical formula
scale_expect = scale_mle(data, rvs_loc)
loc, scale = stats.rayleigh.fit(data, floc=rvs_loc)
assert_equal(loc, rvs_loc)
assert_equal(scale, scale_expect)
# when `fscale` is fixed, superclass fit is used to determine `loc`.
loc, scale = stats.rayleigh.fit(data, fscale=.6)
assert_equal(scale, .6)
# with both parameters free, one dimensional optimization is done
# over a new function that takes into account the dependent relation
# of `scale` to `loc`.
loc, scale = stats.rayleigh.fit(data)
# test that `scale` is defined by its relation to `loc`
assert_equal(scale, scale_mle(data, loc))
@pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01],
[0.08464463, 0.12069025]])
def test_fit_comparison_super_method(self, rvs_loc, rvs_scale):
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
_assert_less_or_close_loglike(stats.rayleigh, data)
def test_fit_warnings(self):
assert_fit_warnings(stats.rayleigh)
def test_fit_gh17088(self):
# `rayleigh.fit` could return a location that was inconsistent with
# the data. See gh-17088.
rng = np.random.default_rng(456)
loc, scale, size = 50, 600, 500
rvs = stats.rayleigh.rvs(loc, scale, size=size, random_state=rng)
loc_fit, _ = stats.rayleigh.fit(rvs)
assert loc_fit < np.min(rvs)
loc_fit, scale_fit = stats.rayleigh.fit(rvs, fscale=scale)
assert loc_fit < np.min(rvs)
assert scale_fit == scale
class TestExponWeib:
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
# Reference values were computed with mpmath, e.g:
#
# from mpmath import mp
#
# def mp_sf(x, a, c):
# x = mp.mpf(x)
# a = mp.mpf(a)
# c = mp.mpf(c)
# return -mp.powm1(-mp.expm1(-x**c)), a)
#
# mp.dps = 100
# print(float(mp_sf(1, 2.5, 0.75)))
#
# prints
#
# 0.6823127476985246
#
@pytest.mark.parametrize(
'x, a, c, ref',
[(1, 2.5, 0.75, 0.6823127476985246),
(50, 2.5, 0.75, 1.7056666054719663e-08),
(125, 2.5, 0.75, 1.4534393150714602e-16),
(250, 2.5, 0.75, 1.2391389689773512e-27),
(250, 0.03125, 0.75, 1.548923711221689e-29),
(3, 0.03125, 3.0, 5.873527551689983e-14),
(2e80, 10.0, 0.02, 2.9449084156902135e-17)]
)
def test_sf(self, x, a, c, ref):
sf = stats.exponweib.sf(x, a, c)
assert_allclose(sf, ref, rtol=1e-14)
# Reference values were computed with mpmath, e.g.
#
# from mpmath import mp
#
# def mp_isf(p, a, c):
# p = mp.mpf(p)
# a = mp.mpf(a)
# c = mp.mpf(c)
# return (-mp.log(-mp.expm1(mp.log1p(-p)/a)))**(1/c)
#
# mp.dps = 100
# print(float(mp_isf(0.25, 2.5, 0.75)))
#
# prints
#
# 2.8946008178158924
#
@pytest.mark.parametrize(
'p, a, c, ref',
[(0.25, 2.5, 0.75, 2.8946008178158924),
(3e-16, 2.5, 0.75, 121.77966713102938),
(1e-12, 1, 2, 5.256521769756932),
(2e-13, 0.03125, 3, 2.953915059484589),
(5e-14, 10.0, 0.02, 7.57094886384687e+75)]
)
def test_isf(self, p, a, c, ref):
isf = stats.exponweib.isf(p, a, c)
assert_allclose(isf, ref, rtol=5e-14)
class TestFatigueLife:
def test_sf_tail(self):
# Expected value computed with mpmath:
# import mpmath
# mpmath.mp.dps = 80
# x = mpmath.mpf(800.0)
# c = mpmath.mpf(2.5)
# s = float(1 - mpmath.ncdf(1/c * (mpmath.sqrt(x)
# - 1/mpmath.sqrt(x))))
# print(s)
# Output:
# 6.593376447038406e-30
s = stats.fatiguelife.sf(800.0, 2.5)
assert_allclose(s, 6.593376447038406e-30, rtol=1e-13)
def test_isf_tail(self):
# See test_sf_tail for the mpmath code.
p = 6.593376447038406e-30
q = stats.fatiguelife.isf(p, 2.5)
assert_allclose(q, 800.0, rtol=1e-13)
class TestWeibull:
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
@pytest.mark.parametrize('scale', [1.0, 0.1])
def test_delta_cdf(self, scale):
# Expected value computed with mpmath:
#
# def weibull_min_sf(x, k, scale):
# x = mpmath.mpf(x)
# k = mpmath.mpf(k)
# scale =mpmath.mpf(scale)
# return mpmath.exp(-(x/scale)**k)
#
# >>> import mpmath
# >>> mpmath.mp.dps = 60
# >>> sf1 = weibull_min_sf(7.5, 3, 1)
# >>> sf2 = weibull_min_sf(8.0, 3, 1)
# >>> float(sf1 - sf2)
# 6.053624060118734e-184
#
delta = stats.weibull_min._delta_cdf(scale*7.5, scale*8, 3,
scale=scale)
assert_allclose(delta, 6.053624060118734e-184)
def test_fit_min(self):
rng = np.random.default_rng(5985959307161735394)
c, loc, scale = 2, 3.5, 0.5 # arbitrary, valid parameters
dist = stats.weibull_min(c, loc, scale)
rvs = dist.rvs(size=100, random_state=rng)
# test that MLE still honors guesses and fixed parameters
c2, loc2, scale2 = stats.weibull_min.fit(rvs, 1.5, floc=3)
c3, loc3, scale3 = stats.weibull_min.fit(rvs, 1.6, floc=3)
assert loc2 == loc3 == 3 # fixed parameter is respected
assert c2 != c3 # different guess -> (slightly) different outcome
# quality of fit is tested elsewhere
# test that MoM honors fixed parameters, accepts (but ignores) guesses
c4, loc4, scale4 = stats.weibull_min.fit(rvs, 3, fscale=3, method='mm')
assert scale4 == 3
# because scale was fixed, only the mean and skewness will be matched
dist4 = stats.weibull_min(c4, loc4, scale4)
res = dist4.stats(moments='ms')
ref = np.mean(rvs), stats.skew(rvs)
assert_allclose(res, ref)
# reference values were computed via mpmath
# from mpmath import mp
# def weibull_sf_mpmath(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return float(mp.exp(-x**c))
@pytest.mark.parametrize('x, c, ref', [(50, 1, 1.9287498479639178e-22),
(1000, 0.8,
8.131269637872743e-110)])
def test_sf_isf(self, x, c, ref):
assert_allclose(stats.weibull_min.sf(x, c), ref, rtol=5e-14)
assert_allclose(stats.weibull_min.isf(ref, c), x, rtol=5e-14)
class TestDweibull:
def test_entropy(self):
# Test that dweibull entropy follows that of weibull_min.
# (Generic tests check that the dweibull entropy is consistent
# with its PDF. As for accuracy, dweibull entropy should be just
# as accurate as weibull_min entropy. Checks of accuracy against
# a reference need only be applied to the fundamental distribution -
# weibull_min.)
rng = np.random.default_rng(8486259129157041777)
c = 10**rng.normal(scale=100, size=10)
res = stats.dweibull.entropy(c)
ref = stats.weibull_min.entropy(c) - np.log(0.5)
assert_allclose(res, ref, rtol=1e-15)
def test_sf(self):
# test that for positive values the dweibull survival function is half
# the weibull_min survival function
rng = np.random.default_rng(8486259129157041777)
c = 10**rng.normal(scale=1, size=10)
x = 10 * rng.uniform()
res = stats.dweibull.sf(x, c)
ref = 0.5 * stats.weibull_min.sf(x, c)
assert_allclose(res, ref, rtol=1e-15)
class TestTruncWeibull:
def test_pdf_bounds(self):
# test bounds
y = stats.truncweibull_min.pdf([0.1, 2.0], 2.0, 0.11, 1.99)
assert_equal(y, [0.0, 0.0])
def test_logpdf(self):
y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, np.inf)
assert_equal(y, 0.0)
# hand calculation
y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, 4.0)
assert_allclose(y, 0.14541345786885884)
def test_ppf_bounds(self):
# test bounds
y = stats.truncweibull_min.ppf([0.0, 1.0], 2.0, 0.1, 2.0)
assert_equal(y, [0.1, 2.0])
def test_cdf_to_ppf(self):
q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.]
x = stats.truncweibull_min.ppf(q, 2., 0., 3.)
q_out = stats.truncweibull_min.cdf(x, 2., 0., 3.)
assert_allclose(q, q_out)
def test_sf_to_isf(self):
q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.]
x = stats.truncweibull_min.isf(q, 2., 0., 3.)
q_out = stats.truncweibull_min.sf(x, 2., 0., 3.)
assert_allclose(q, q_out)
def test_munp(self):
c = 2.
a = 1.
b = 3.
def xnpdf(x, n):
return x**n*stats.truncweibull_min.pdf(x, c, a, b)
m0 = stats.truncweibull_min.moment(0, c, a, b)
assert_equal(m0, 1.)
m1 = stats.truncweibull_min.moment(1, c, a, b)
m1_expected, _ = quad(lambda x: xnpdf(x, 1), a, b)
assert_allclose(m1, m1_expected)
m2 = stats.truncweibull_min.moment(2, c, a, b)
m2_expected, _ = quad(lambda x: xnpdf(x, 2), a, b)
assert_allclose(m2, m2_expected)
m3 = stats.truncweibull_min.moment(3, c, a, b)
m3_expected, _ = quad(lambda x: xnpdf(x, 3), a, b)
assert_allclose(m3, m3_expected)
m4 = stats.truncweibull_min.moment(4, c, a, b)
m4_expected, _ = quad(lambda x: xnpdf(x, 4), a, b)
assert_allclose(m4, m4_expected)
def test_reference_values(self):
a = 1.
b = 3.
c = 2.
x_med = np.sqrt(1 - np.log(0.5 + np.exp(-(8. + np.log(2.)))))
cdf = stats.truncweibull_min.cdf(x_med, c, a, b)
assert_allclose(cdf, 0.5)
lc = stats.truncweibull_min.logcdf(x_med, c, a, b)
assert_allclose(lc, -np.log(2.))
ppf = stats.truncweibull_min.ppf(0.5, c, a, b)
assert_allclose(ppf, x_med)
sf = stats.truncweibull_min.sf(x_med, c, a, b)
assert_allclose(sf, 0.5)
ls = stats.truncweibull_min.logsf(x_med, c, a, b)
assert_allclose(ls, -np.log(2.))
isf = stats.truncweibull_min.isf(0.5, c, a, b)
assert_allclose(isf, x_med)
def test_compare_weibull_min(self):
# Verify that the truncweibull_min distribution gives the same results
# as the original weibull_min
x = 1.5
c = 2.0
a = 0.0
b = np.inf
scale = 3.0
p = stats.weibull_min.pdf(x, c, scale=scale)
p_trunc = stats.truncweibull_min.pdf(x, c, a, b, scale=scale)
assert_allclose(p, p_trunc)
lp = stats.weibull_min.logpdf(x, c, scale=scale)
lp_trunc = stats.truncweibull_min.logpdf(x, c, a, b, scale=scale)
assert_allclose(lp, lp_trunc)
cdf = stats.weibull_min.cdf(x, c, scale=scale)
cdf_trunc = stats.truncweibull_min.cdf(x, c, a, b, scale=scale)
assert_allclose(cdf, cdf_trunc)
lc = stats.weibull_min.logcdf(x, c, scale=scale)
lc_trunc = stats.truncweibull_min.logcdf(x, c, a, b, scale=scale)
assert_allclose(lc, lc_trunc)
s = stats.weibull_min.sf(x, c, scale=scale)
s_trunc = stats.truncweibull_min.sf(x, c, a, b, scale=scale)
assert_allclose(s, s_trunc)
ls = stats.weibull_min.logsf(x, c, scale=scale)
ls_trunc = stats.truncweibull_min.logsf(x, c, a, b, scale=scale)
assert_allclose(ls, ls_trunc)
# # Also test using a large value x, for which computing the survival
# # function using the CDF would result in 0.
s = stats.truncweibull_min.sf(30, 2, a, b, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.truncweibull_min.logsf(30, 2, a, b, scale=3)
assert_allclose(ls, -100)
def test_compare_weibull_min2(self):
# Verify that the truncweibull_min distribution PDF and CDF results
# are the same as those calculated from truncating weibull_min
c, a, b = 2.5, 0.25, 1.25
x = np.linspace(a, b, 100)
pdf1 = stats.truncweibull_min.pdf(x, c, a, b)
cdf1 = stats.truncweibull_min.cdf(x, c, a, b)
norm = stats.weibull_min.cdf(b, c) - stats.weibull_min.cdf(a, c)
pdf2 = stats.weibull_min.pdf(x, c) / norm
cdf2 = (stats.weibull_min.cdf(x, c) - stats.weibull_min.cdf(a, c))/norm
np.testing.assert_allclose(pdf1, pdf2)
np.testing.assert_allclose(cdf1, cdf2)
class TestRdist:
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 200
# def rdist_sf_mpmath(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return float(mp.betainc(c/2, c/2, (x+1)/2, mp.one, regularized=True))
@pytest.mark.parametrize(
"x, c, ref",
[
(0.0001, 541, 0.49907251345565845),
(0.1, 241, 0.06000788166249205),
(0.5, 441, 1.0655898106047832e-29),
(0.8, 341, 6.025478373732215e-78),
]
)
def test_rdist_sf(self, x, c, ref):
assert_allclose(stats.rdist.sf(x, c), ref, rtol=5e-14)
class TestTrapezoid:
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapezoid.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapezoid.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapezoid.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapezoid.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapezoid.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapezoid.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapezoid.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapezoid.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapezoid.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapezoid.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapezoid.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapezoid.cdf(1.0, 0.2, 0.8), 1.0)
def test_moments_and_entropy(self):
# issue #11795: improve precision of trapezoid stats
# Apply formulas from Wikipedia for the following parameters:
a, b, c, d = -3, -1, 2, 3 # => 1/3, 5/6, -3, 6
p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a
h = 2 / (d+c-b-a)
def moment(n):
return (h * ((d**(n+2) - c**(n+2)) / (d-c)
- (b**(n+2) - a**(n+2)) / (b-a)) /
(n+1) / (n+2))
mean = moment(1)
var = moment(2) - mean**2
entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a))
assert_almost_equal(stats.trapezoid.mean(p1, p2, loc, scale),
mean, decimal=13)
assert_almost_equal(stats.trapezoid.var(p1, p2, loc, scale),
var, decimal=13)
assert_almost_equal(stats.trapezoid.entropy(p1, p2, loc, scale),
entropy, decimal=13)
# Check boundary cases where scipy d=0 or d=1.
assert_almost_equal(stats.trapezoid.mean(0, 0, -3, 6), -1, decimal=13)
assert_almost_equal(stats.trapezoid.mean(0, 1, -3, 6), 0, decimal=13)
assert_almost_equal(stats.trapezoid.var(0, 1, -3, 6), 3, decimal=13)
def test_trapezoid_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapezoid.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
# Check that the stats() method supports vector arguments.
v = np.asarray(stats.trapezoid.stats(c, d, moments="mvsk"))
cc, dd = np.broadcast_arrays(c, d)
res = np.empty((cc.size, 4)) # 4 stats returned per value
ind = np.arange(cc.size)
for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.stats(c1, d1, moments="mvsk")
assert_allclose(v, res.T.reshape(v.shape), atol=1e-15)
def test_trapz(self):
# Basic test for alias
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
class TestTriang:
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMaxwell:
# reference values were computed with wolfram alpha
# erfc(x/sqrt(2)) + sqrt(2/pi) * x * e^(-x^2/2)
@pytest.mark.parametrize("x, ref",
[(20, 2.2138865931011177e-86),
(0.01, 0.999999734046458435)])
def test_sf(self, x, ref):
assert_allclose(stats.maxwell.sf(x), ref, rtol=1e-14)
# reference values were computed with wolfram alpha
# sqrt(2) * sqrt(Q^(-1)(3/2, q))
@pytest.mark.parametrize("q, ref",
[(0.001, 4.033142223656157022),
(0.9999847412109375, 0.0385743284050381),
(2**-55, 8.95564974719481)])
def test_isf(self, q, ref):
assert_allclose(stats.maxwell.isf(q), ref, rtol=1e-15)
class TestMielke:
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr:
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 -
# (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_burr_isf(self):
# reference values were computed via the reference distribution, e.g.
# mp.dps = 100
# Burr(c=5, d=3).isf([0.1, 1e-10, 1e-20, 1e-40])
c, d = 5.0, 3.0
q = [0.1, 1e-10, 1e-20, 1e-40]
ref = [1.9469686558286508, 124.57309395989076, 12457.309396155173,
124573093.96155174]
assert_allclose(stats.burr.isf(q, c, d), ref, rtol=1e-14)
class TestBurr12:
@pytest.mark.parametrize('scale, expected',
[(1.0, 2.3283064359965952e-170),
(3.5, 5.987114417447875e-153)])
def test_delta_cdf(self, scale, expected):
# Expected value computed with mpmath:
#
# def burr12sf(x, c, d, scale):
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# d = mpmath.mpf(d)
# scale = mpmath.mpf(scale)
# return (mpmath.mp.one + (x/scale)**c)**(-d)
#
# >>> import mpmath
# >>> mpmath.mp.dps = 60
# >>> float(burr12sf(2e5, 4, 8, 1) - burr12sf(4e5, 4, 8, 1))
# 2.3283064359965952e-170
# >>> float(burr12sf(2e5, 4, 8, 3.5) - burr12sf(4e5, 4, 8, 3.5))
# 5.987114417447875e-153
#
delta = stats.burr12._delta_cdf(2e5, 4e5, 4, 8, scale=scale)
assert_allclose(delta, expected, rtol=1e-13)
def test_moments_edge(self):
# gh-18838 reported that burr12 moments could be invalid; see above.
# Check that this is resolved in an edge case where c*d == n, and
# compare the results against those produced by Mathematica, e.g.
# `SinghMaddalaDistribution[2, 2, 1]` at Wolfram Alpha.
c, d = 2, 2
mean = np.pi/4
var = 1 - np.pi**2/16
skew = np.pi**3/(32*var**1.5)
kurtosis = np.nan
ref = [mean, var, skew, kurtosis]
res = stats.burr12(c, d).stats('mvsk')
assert_allclose(res, ref, rtol=1e-14)
class TestStudentizedRange:
# For alpha = .05, .01, and .001, and for each value of
# v = [1, 3, 10, 20, 120, inf], a Q was picked from each table for
# k = [2, 8, 14, 20].
# these arrays are written with `k` as column, and `v` as rows.
# Q values are taken from table 3:
# https://www.jstor.org/stable/2237810
q05 = [17.97, 45.40, 54.33, 59.56,
4.501, 8.853, 10.35, 11.24,
3.151, 5.305, 6.028, 6.467,
2.950, 4.768, 5.357, 5.714,
2.800, 4.363, 4.842, 5.126,
2.772, 4.286, 4.743, 5.012]
q01 = [90.03, 227.2, 271.8, 298.0,
8.261, 15.64, 18.22, 19.77,
4.482, 6.875, 7.712, 8.226,
4.024, 5.839, 6.450, 6.823,
3.702, 5.118, 5.562, 5.827,
3.643, 4.987, 5.400, 5.645]
q001 = [900.3, 2272, 2718, 2980,
18.28, 34.12, 39.69, 43.05,
6.487, 9.352, 10.39, 11.03,
5.444, 7.313, 7.966, 8.370,
4.772, 6.039, 6.448, 6.695,
4.654, 5.823, 6.191, 6.411]
qs = np.concatenate((q05, q01, q001))
ps = [.95, .99, .999]
vs = [1, 3, 10, 20, 120, np.inf]
ks = [2, 8, 14, 20]
data = list(zip(product(ps, vs, ks), qs))
# A small selection of large-v cases generated with R's `ptukey`
# Each case is in the format (q, k, v, r_result)
r_data = [
(0.1, 3, 9001, 0.002752818526842),
(1, 10, 1000, 0.000526142388912),
(1, 3, np.inf, 0.240712641229283),
(4, 3, np.inf, 0.987012338626815),
(1, 10, np.inf, 0.000519869467083),
]
def test_cdf_against_tables(self):
for pvk, q in self.data:
p_expected, v, k = pvk
res_p = stats.studentized_range.cdf(q, k, v)
assert_allclose(res_p, p_expected, rtol=1e-4)
@pytest.mark.slow
def test_ppf_against_tables(self):
for pvk, q_expected in self.data:
p, v, k = pvk
res_q = stats.studentized_range.ppf(p, k, v)
assert_allclose(res_q, q_expected, rtol=5e-4)
path_prefix = os.path.dirname(__file__)
relative_path = "data/studentized_range_mpmath_ref.json"
with open(os.path.join(path_prefix, relative_path)) as file:
pregenerated_data = json.load(file)
@pytest.mark.parametrize("case_result", pregenerated_data["cdf_data"])
def test_cdf_against_mp(self, case_result):
src_case = case_result["src_case"]
mp_result = case_result["mp_result"]
qkv = src_case["q"], src_case["k"], src_case["v"]
res = stats.studentized_range.cdf(*qkv)
assert_allclose(res, mp_result,
atol=src_case["expected_atol"],
rtol=src_case["expected_rtol"])
@pytest.mark.parametrize("case_result", pregenerated_data["pdf_data"])
def test_pdf_against_mp(self, case_result):
src_case = case_result["src_case"]
mp_result = case_result["mp_result"]
qkv = src_case["q"], src_case["k"], src_case["v"]
res = stats.studentized_range.pdf(*qkv)
assert_allclose(res, mp_result,
atol=src_case["expected_atol"],
rtol=src_case["expected_rtol"])
@pytest.mark.slow
@pytest.mark.xfail_on_32bit("intermittent RuntimeWarning: invalid value.")
@pytest.mark.parametrize("case_result", pregenerated_data["moment_data"])
def test_moment_against_mp(self, case_result):
src_case = case_result["src_case"]
mp_result = case_result["mp_result"]
mkv = src_case["m"], src_case["k"], src_case["v"]
# Silence invalid value encountered warnings. Actual problems will be
# caught by the result comparison.
with np.errstate(invalid='ignore'):
res = stats.studentized_range.moment(*mkv)
assert_allclose(res, mp_result,
atol=src_case["expected_atol"],
rtol=src_case["expected_rtol"])
def test_pdf_integration(self):
k, v = 3, 10
# Test whether PDF integration is 1 like it should be.
res = quad(stats.studentized_range.pdf, 0, np.inf, args=(k, v))
assert_allclose(res[0], 1)
@pytest.mark.xslow
def test_pdf_against_cdf(self):
k, v = 3, 10
# Test whether the integrated PDF matches the CDF using cumulative
# integration. Use a small step size to reduce error due to the
# summation. This is slow, but tests the results well.
x = np.arange(0, 10, step=0.01)
y_cdf = stats.studentized_range.cdf(x, k, v)[1:]
y_pdf_raw = stats.studentized_range.pdf(x, k, v)
y_pdf_cumulative = cumulative_trapezoid(y_pdf_raw, x)
# Because of error caused by the summation, use a relatively large rtol
assert_allclose(y_pdf_cumulative, y_cdf, rtol=1e-4)
@pytest.mark.parametrize("r_case_result", r_data)
def test_cdf_against_r(self, r_case_result):
# Test large `v` values using R
q, k, v, r_res = r_case_result
with np.errstate(invalid='ignore'):
res = stats.studentized_range.cdf(q, k, v)
assert_allclose(res, r_res)
@pytest.mark.slow
@pytest.mark.xfail_on_32bit("intermittent RuntimeWarning: invalid value.")
def test_moment_vectorization(self):
# Test moment broadcasting. Calls `_munp` directly because
# `rv_continuous.moment` is broken at time of writing. See gh-12192
# Silence invalid value encountered warnings. Actual problems will be
# caught by the result comparison.
with np.errstate(invalid='ignore'):
m = stats.studentized_range._munp([1, 2], [4, 5], [10, 11])
assert_allclose(m.shape, (2,))
with pytest.raises(ValueError, match="...could not be broadcast..."):
stats.studentized_range._munp(1, [4, 5], [10, 11, 12])
@pytest.mark.xslow
def test_fitstart_valid(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
# the integration warning message may differ
sup.filter(IntegrationWarning)
k, df, _, _ = stats.studentized_range._fitstart([1, 2, 3])
assert_(stats.studentized_range._argcheck(k, df))
def test_infinite_df(self):
# Check that the CDF and PDF infinite and normal integrators
# roughly match for a high df case
res = stats.studentized_range.pdf(3, 10, np.inf)
res_finite = stats.studentized_range.pdf(3, 10, 99999)
assert_allclose(res, res_finite, atol=1e-4, rtol=1e-4)
res = stats.studentized_range.cdf(3, 10, np.inf)
res_finite = stats.studentized_range.cdf(3, 10, 99999)
assert_allclose(res, res_finite, atol=1e-4, rtol=1e-4)
def test_df_cutoff(self):
# Test that the CDF and PDF properly switch integrators at df=100,000.
# The infinite integrator should be different enough that it fails
# an allclose assertion. Also sanity check that using the same
# integrator does pass the allclose with a 1-df difference, which
# should be tiny.
res = stats.studentized_range.pdf(3, 10, 100000)
res_finite = stats.studentized_range.pdf(3, 10, 99999)
res_sanity = stats.studentized_range.pdf(3, 10, 99998)
assert_raises(AssertionError, assert_allclose, res, res_finite,
atol=1e-6, rtol=1e-6)
assert_allclose(res_finite, res_sanity, atol=1e-6, rtol=1e-6)
res = stats.studentized_range.cdf(3, 10, 100000)
res_finite = stats.studentized_range.cdf(3, 10, 99999)
res_sanity = stats.studentized_range.cdf(3, 10, 99998)
assert_raises(AssertionError, assert_allclose, res, res_finite,
atol=1e-6, rtol=1e-6)
assert_allclose(res_finite, res_sanity, atol=1e-6, rtol=1e-6)
def test_clipping(self):
# The result of this computation was -9.9253938401489e-14 on some
# systems. The correct result is very nearly zero, but should not be
# negative.
q, k, v = 34.6413996195345746, 3, 339
p = stats.studentized_range.sf(q, k, v)
assert_allclose(p, 0, atol=1e-10)
assert p >= 0
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
with np.errstate(divide='ignore'):
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
with np.errstate(divide='ignore'):
params = np.array(stats.lognorm.fit(x, floc=0.))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
@pytest.mark.parametrize("a, c, ref, tol",
[(1500000.0, 1, 8.529426144018633, 1e-15),
(1e+30, 1, 35.95771492811536, 1e-15),
(1e+100, 1, 116.54819318290696, 1e-15),
(3e3, 1, 5.422011196659015, 1e-13),
(3e6, -1e100, -236.29663213396054, 1e-15),
(3e60, 1e-100, 1.3925371786831085e+102, 1e-15)])
def test_gengamma_extreme_entropy(a, c, ref, tol):
# The reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
#
# def gen_entropy(a, c):
# a, c = mp.mpf(a), mp.mpf(c)
# val = mp.digamma(a)
# h = (a * (mp.one - val) + val/c + mp.loggamma(a) - mp.log(abs(c)))
# return float(h)
assert_allclose(stats.gengamma.entropy(a, c), ref, rtol=tol)
def test_gengamma_endpoint_with_neg_c():
p = stats.gengamma.pdf(0, 1, -1)
assert p == 0.0
logp = stats.gengamma.logpdf(0, 1, -1)
assert logp == -np.inf
def test_gengamma_munp():
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_levy_sf():
# Large values, far into the tail of the distribution.
x = np.array([1e15, 1e25, 1e35, 1e50])
# Expected values were calculated with mpmath.
expected = np.array([2.5231325220201597e-08,
2.52313252202016e-13,
2.52313252202016e-18,
7.978845608028653e-26])
y = stats.levy.sf(x)
assert_allclose(y, expected, rtol=1e-14)
# The expected values for levy.isf(p) were calculated with mpmath.
# For loc=0 and scale=1, the inverse SF can be computed with
#
# import mpmath
#
# def levy_invsf(p):
# return 1/(2*mpmath.erfinv(p)**2)
#
# For example, with mpmath.mp.dps set to 60, float(levy_invsf(1e-20))
# returns 6.366197723675814e+39.
#
@pytest.mark.parametrize('p, expected_isf',
[(1e-20, 6.366197723675814e+39),
(1e-8, 6366197723675813.0),
(0.375, 4.185810119346273),
(0.875, 0.42489442055310134),
(0.999, 0.09235685880262713),
(0.9999999962747097, 0.028766845244146945)])
def test_levy_isf(p, expected_isf):
x = stats.levy.isf(p)
assert_allclose(x, expected_isf, atol=5e-15)
def test_levy_l_sf():
# Test levy_l.sf for small arguments.
x = np.array([-0.016, -0.01, -0.005, -0.0015])
# Expected values were calculated with mpmath.
expected = np.array([2.6644463892359302e-15,
1.523970604832107e-23,
2.0884875837625492e-45,
5.302850374626878e-147])
y = stats.levy_l.sf(x)
assert_allclose(y, expected, rtol=1e-13)
def test_levy_l_isf():
# Test roundtrip sf(isf(p)), including a small input value.
p = np.array([3.0e-15, 0.25, 0.99])
x = stats.levy_l.isf(p)
q = stats.levy_l.sf(x)
assert_allclose(q, p, rtol=5e-14)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
# Verify logpdf has extended precision when pdf underflows to 0
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)
assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5, 10)
assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
def test_ncx2_gh8665():
# test that gh-8665 is resolved; previously this tended to nonzero value
x = np.array([4.99515382e+00, 1.07617327e+01, 2.31854502e+01,
4.99515382e+01, 1.07617327e+02, 2.31854502e+02,
4.99515382e+02, 1.07617327e+03, 2.31854502e+03,
4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
4.99515382e+04])
nu, lam = 20, 499.51538166556196
sf = stats.ncx2.sf(x, df=nu, nc=lam)
# computed in R. Couldn't find a survival function implementation
# options(digits=16)
# x <- c(4.99515382e+00, 1.07617327e+01, 2.31854502e+01, 4.99515382e+01,
# 1.07617327e+02, 2.31854502e+02, 4.99515382e+02, 1.07617327e+03,
# 2.31854502e+03, 4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
# 4.99515382e+04)
# nu <- 20
# lam <- 499.51538166556196
# 1 - pchisq(x, df = nu, ncp = lam)
sf_expected = [1.0000000000000000, 1.0000000000000000, 1.0000000000000000,
1.0000000000000000, 1.0000000000000000, 0.9999999999999888,
0.6646525582135460, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000]
assert_allclose(sf, sf_expected, atol=1e-12)
def test_ncx2_gh11777():
# regression test for gh-11777:
# At high values of degrees of freedom df, ensure the pdf of ncx2 does
# not get clipped to zero when the non-centrality parameter is
# sufficiently less than df
df = 6700
nc = 5300
x = np.linspace(stats.ncx2.ppf(0.001, df, nc),
stats.ncx2.ppf(0.999, df, nc), num=10000)
ncx2_pdf = stats.ncx2.pdf(x, df, nc)
gauss_approx = stats.norm.pdf(x, df + nc, np.sqrt(2 * df + 4 * nc))
# use huge tolerance as we're only looking for obvious discrepancy
assert_allclose(ncx2_pdf, gauss_approx, atol=1e-4)
# Expected values for foldnorm.sf were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 60
# def foldcauchy_sf(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return mp.one - (mp.atan(x - c) + mp.atan(x + c))/mp.pi
#
# E.g.
#
# >>> float(foldcauchy_sf(2, 1))
# 0.35241638234956674
#
@pytest.mark.parametrize('x, c, expected',
[(2, 1, 0.35241638234956674),
(2, 2, 0.5779791303773694),
(1e13, 1, 6.366197723675813e-14),
(2e16, 1, 3.183098861837907e-17),
(1e13, 2e11, 6.368745221764519e-14),
(0.125, 200, 0.999998010612169)])
def test_foldcauchy_sf(x, c, expected):
sf = stats.foldcauchy.sf(x, c)
assert_allclose(sf, expected, 2e-15)
# The same mpmath code shown in the comments above test_foldcauchy_sf()
# is used to create these expected values.
@pytest.mark.parametrize('x, expected',
[(2, 0.2951672353008665),
(1e13, 6.366197723675813e-14),
(2e16, 3.183098861837907e-17),
(5e80, 1.2732395447351629e-81)])
def test_halfcauchy_sf(x, expected):
sf = stats.halfcauchy.sf(x)
assert_allclose(sf, expected, 2e-15)
# Expected value computed with mpmath:
# expected = mp.cot(mp.pi*p/2)
@pytest.mark.parametrize('p, expected',
[(0.9999995, 7.853981633329977e-07),
(0.975, 0.039290107007669675),
(0.5, 1.0),
(0.01, 63.65674116287158),
(1e-14, 63661977236758.13),
(5e-80, 1.2732395447351627e+79)])
def test_halfcauchy_isf(p, expected):
x = stats.halfcauchy.isf(p)
assert_allclose(x, expected)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
# Expected values for foldnorm.sf were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 60
# def foldnorm_sf(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return mp.ncdf(-x+c) + mp.ncdf(-x-c)
#
# E.g.
#
# >>> float(foldnorm_sf(2, 1))
# 0.16000515196308715
#
@pytest.mark.parametrize('x, c, expected',
[(2, 1, 0.16000515196308715),
(20, 1, 8.527223952630977e-81),
(10, 15, 0.9999997133484281),
(25, 15, 7.619853024160525e-24)])
def test_foldnorm_sf(x, c, expected):
sf = stats.foldnorm.sf(x, c)
assert_allclose(sf, expected, 1e-14)
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency.
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes:
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def test_shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes:
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_crystalball_entropy():
# regression test for gh-13602
cb = stats.crystalball(2, 3)
res1 = cb.entropy()
# -20000 and 30 are negative and positive infinity, respectively
lo, hi, N = -20000, 30, 200000
x = np.linspace(lo, hi, N)
res2 = trapezoid(entr(cb.pdf(x)), x)
assert_allclose(res1, res2, rtol=1e-7)
def test_invweibull_fit():
"""
Test fitting invweibull to data.
Here is a the same calculation in R:
> library(evd)
> library(fitdistrplus)
> x = c(1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99)
> result = fitdist(x, 'frechet', control=list(reltol=1e-13),
+ fix.arg=list(loc=0), start=list(shape=2, scale=3))
> result
Fitting of the distribution ' frechet ' by maximum likelihood
Parameters:
estimate Std. Error
shape 1.048482 0.2261815
scale 3.099456 0.8292887
Fixed parameters:
value
loc 0
"""
def optimizer(func, x0, args=(), disp=0):
return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
x = np.array([1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99])
c, loc, scale = stats.invweibull.fit(x, floc=0, optimizer=optimizer)
assert_allclose(c, 1.048482, rtol=5e-6)
assert loc == 0
assert_allclose(scale, 3.099456, rtol=5e-6)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('x, c, expected',
[(3, 1.5, 0.175064510070713299327),
(2000, 1.5, 1.11802773877318715787e-5),
(2000, 9.25, 2.92060308832269637092e-31),
(1e15, 1.5, 3.16227766016837933199884e-23)])
def test_invweibull_sf(x, c, expected):
computed = stats.invweibull.sf(x, c)
assert_allclose(computed, expected, rtol=1e-15)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('p, c, expected',
[(0.5, 2.5, 1.15789669836468183976),
(3e-18, 5, 3195.77171838060906447)])
def test_invweibull_isf(p, c, expected):
computed = stats.invweibull.isf(p, c)
assert_allclose(computed, expected, rtol=1e-15)
@pytest.mark.parametrize(
'df1,df2,x',
[(2, 2, [-0.5, 0.2, 1.0, 2.3]),
(4, 11, [-0.5, 0.2, 1.0, 2.3]),
(7, 17, [1, 2, 3, 4, 5])]
)
def test_ncf_edge_case(df1, df2, x):
# Test for edge case described in gh-11660.
# Non-central Fisher distribution when nc = 0
# should be the same as Fisher distribution.
nc = 0
expected_cdf = stats.f.cdf(x, df1, df2)
calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)
assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)
# when ncf_gen._skip_pdf will be used instead of generic pdf,
# this additional test will be useful.
expected_pdf = stats.f.pdf(x, df1, df2)
calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)
assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
def test_ncf_cdf_spotcheck():
# Regression test for gh-15582 testing against values from R/MATLAB
# Generate check_val from R or MATLAB as follows:
# R: pf(20, df1 = 6, df2 = 33, ncp = 30.4) = 0.998921
# MATLAB: ncfcdf(20, 6, 33, 30.4) = 0.998921
scipy_val = stats.ncf.cdf(20, 6, 33, 30.4)
check_val = 0.998921
assert_allclose(check_val, np.round(scipy_val, decimals=6))
@pytest.mark.skipif(sys.maxsize <= 2**32,
reason="On some 32-bit the warning is not raised")
def test_ncf_ppf_issue_17026():
# Regression test for gh-17026
x = np.linspace(0, 1, 600)
x[0] = 1e-16
par = (0.1, 2, 5, 0, 1)
with pytest.warns(RuntimeWarning):
q = stats.ncf.ppf(x, *par)
q0 = [stats.ncf.ppf(xi, *par) for xi in x]
assert_allclose(q, q0)
class TestHistogram:
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm(1.0, 2.5).moment(n), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_histogram_non_uniform():
# Tests rv_histogram works even for non-uniform bin widths
counts, bins = ([1, 1], [0, 1, 1001])
dist = stats.rv_histogram((counts, bins), density=False)
np.testing.assert_allclose(dist.pdf([0.5, 200]), [0.5, 0.0005])
assert dist.median() == 1
dist = stats.rv_histogram((counts, bins), density=True)
np.testing.assert_allclose(dist.pdf([0.5, 200]), 1/1001)
assert dist.median() == 1001/2
# Omitting density produces a warning for non-uniform bins...
message = "Bin widths are not constant. Assuming..."
with assert_warns(RuntimeWarning, match=message):
dist = stats.rv_histogram((counts, bins))
assert dist.median() == 1001/2 # default is like `density=True`
# ... but not for uniform bins
dist = stats.rv_histogram((counts, [0, 1, 2]))
assert dist.median() == 1
class TestLogUniform:
def test_alias(self):
# This test makes sure that "reciprocal" and "loguniform" are
# aliases of the same distribution and that both are log-uniform
rng = np.random.default_rng(98643218961)
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=rng)
rng = np.random.default_rng(98643218961)
rv2 = stats.reciprocal(10 ** -3, 10 ** 0)
rvs2 = rv2.rvs(size=10000, random_state=rng)
assert_allclose(rvs2, rvs)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
@pytest.mark.parametrize("method", ['mle', 'mm'])
def test_fit_override(self, method):
# loguniform is overparameterized, so check that fit override enforces
# scale=1 unless fscale is provided by the user
rng = np.random.default_rng(98643218961)
rvs = stats.loguniform.rvs(0.1, 1, size=1000, random_state=rng)
a, b, loc, scale = stats.loguniform.fit(rvs, method=method)
assert scale == 1
a, b, loc, scale = stats.loguniform.fit(rvs, fscale=2, method=method)
assert scale == 2
def test_overflow(self):
# original formulation had overflow issues; check that this is resolved
# Extensive accuracy tests elsewhere, no need to test all methods
rng = np.random.default_rng(7136519550773909093)
a, b = 1e-200, 1e200
dist = stats.loguniform(a, b)
# test roundtrip error
cdf = rng.uniform(0, 1, size=1000)
assert_allclose(dist.cdf(dist.ppf(cdf)), cdf)
rvs = dist.rvs(size=1000)
assert_allclose(dist.ppf(dist.cdf(rvs)), rvs)
# test a property of the pdf (and that there is no overflow)
x = 10.**np.arange(-200, 200)
pdf = dist.pdf(x) # no overflow
assert_allclose(pdf[:-1]/pdf[1:], 10)
# check munp against wikipedia reference
mean = (b - a)/(np.log(b) - np.log(a))
assert_allclose(dist.mean(), mean)
class TestArgus:
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
@pytest.mark.parametrize('chi, random_state', [
[0.1, 325], # chi <= 0.5: rejection method case 1
[1.3, 155], # 0.5 < chi <= 1.8: rejection method case 2
[3.5, 135] # chi > 1.8: transform conditional Gamma distribution
])
def test_rvs(self, chi, random_state):
x = stats.argus.rvs(chi, size=500, random_state=random_state)
_, p = stats.kstest(x, "argus", (chi, ))
assert_(p > 0.05)
@pytest.mark.parametrize('chi', [1e-9, 1e-6])
def test_rvs_small_chi(self, chi):
# test for gh-11699 => rejection method case 1 can even handle chi=0
# the CDF of the distribution for chi=0 is 1 - (1 - x**2)**(3/2)
# test rvs against distribution of limit chi=0
r = stats.argus.rvs(chi, size=500, random_state=890981)
_, p = stats.kstest(r, lambda x: 1 - (1 - x**2)**(3/2))
assert_(p > 0.05)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_mean',
[(1, 0.6187026683551835),
(10, 0.984805536783744),
(40, 0.9990617659702923),
(60, 0.9995831885165300),
(99, 0.9998469348663028)])
def test_mean(self, chi, expected_mean):
m = stats.argus.mean(chi, scale=1)
assert_allclose(m, expected_mean, rtol=1e-13)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_var, rtol',
[(1, 0.05215651254197807, 1e-13),
(10, 0.00015805472008165595, 1e-11),
(40, 5.877763210262901e-07, 1e-8),
(60, 1.1590179389611416e-07, 1e-8),
(99, 1.5623277006064666e-08, 1e-8)])
def test_var(self, chi, expected_var, rtol):
v = stats.argus.var(chi, scale=1)
assert_allclose(v, expected_var, rtol=rtol)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize('chi, expected, rtol',
[(0.9, 0.07646314974436118, 1e-14),
(0.5, 0.015429797891863365, 1e-14),
(0.1, 0.0001325825293278049, 1e-14),
(0.01, 1.3297677078224565e-07, 1e-15),
(1e-3, 1.3298072023958999e-10, 1e-14),
(1e-4, 1.3298075973486862e-13, 1e-14),
(1e-6, 1.32980760133771e-19, 1e-14),
(1e-9, 1.329807601338109e-28, 1e-15)])
def test_argus_phi_small_chi(self, chi, expected, rtol):
assert_allclose(_argus_phi(chi), expected, rtol=rtol)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected',
[(0.5, (0.28414073302940573, 1.2742227939992954, 1.2381254688255896)),
(0.2, (0.296172952995264, 1.2951290588110516, 1.1865767100877576)),
(0.1, (0.29791447523536274, 1.29806307956989, 1.1793168289857412)),
(0.01, (0.2984904104866452, 1.2990283628160553, 1.1769268414080531)),
(1e-3, (0.298496172925224, 1.2990380082487925, 1.176902956021053)),
(1e-4, (0.29849623054991836, 1.2990381047023793, 1.1769027171686324)),
(1e-6, (0.2984962311319278, 1.2990381056765605, 1.1769027147562232)),
(1e-9, (0.298496231131986, 1.299038105676658, 1.1769027147559818))])
def test_pdf_small_chi(self, chi, expected):
x = np.array([0.1, 0.5, 0.9])
assert_allclose(stats.argus.pdf(x, chi), expected, rtol=1e-13)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected',
[(0.5, (0.9857660526895221, 0.6616565930168475, 0.08796070398429937)),
(0.2, (0.9851555052359501, 0.6514666238985464, 0.08362690023746594)),
(0.1, (0.9850670974995661, 0.6500061310508574, 0.08302050640683846)),
(0.01, (0.9850378582451867, 0.6495239242251358, 0.08282109244852445)),
(1e-3, (0.9850375656906663, 0.6495191015522573, 0.08281910005231098)),
(1e-4, (0.9850375627651049, 0.6495190533254682, 0.08281908012852317)),
(1e-6, (0.9850375627355568, 0.6495190528383777, 0.08281907992729293)),
(1e-9, (0.9850375627355538, 0.649519052838329, 0.0828190799272728))])
def test_sf_small_chi(self, chi, expected):
x = np.array([0.1, 0.5, 0.9])
assert_allclose(stats.argus.sf(x, chi), expected, rtol=1e-14)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected',
[(0.5, (0.0142339473104779, 0.3383434069831524, 0.9120392960157007)),
(0.2, (0.014844494764049919, 0.34853337610145363, 0.916373099762534)),
(0.1, (0.014932902500433911, 0.34999386894914264, 0.9169794935931616)),
(0.01, (0.014962141754813293, 0.35047607577486417, 0.9171789075514756)),
(1e-3, (0.01496243430933372, 0.35048089844774266, 0.917180899947689)),
(1e-4, (0.014962437234895118, 0.3504809466745317, 0.9171809198714769)),
(1e-6, (0.01496243726444329, 0.3504809471616223, 0.9171809200727071)),
(1e-9, (0.014962437264446245, 0.350480947161671, 0.9171809200727272))])
def test_cdf_small_chi(self, chi, expected):
x = np.array([0.1, 0.5, 0.9])
assert_allclose(stats.argus.cdf(x, chi), expected, rtol=1e-12)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected, rtol',
[(0.5, (0.5964284712757741, 0.052890651988588604), 1e-12),
(0.101, (0.5893490968089076, 0.053017469847275685), 1e-11),
(0.1, (0.5893431757009437, 0.05301755449499372), 1e-13),
(0.01, (0.5890515677940915, 0.05302167905837031), 1e-13),
(1e-3, (0.5890486520005177, 0.053021719862088104), 1e-13),
(1e-4, (0.5890486228426105, 0.0530217202700811), 1e-13),
(1e-6, (0.5890486225481156, 0.05302172027420182), 1e-13),
(1e-9, (0.5890486225480862, 0.05302172027420224), 1e-13)])
def test_stats_small_chi(self, chi, expected, rtol):
val = stats.argus.stats(chi, moments='mv')
assert_allclose(val, expected, rtol=rtol)
class TestNakagami:
def test_logpdf(self):
# Test nakagami logpdf for an input where the PDF is smaller
# than can be represented with 64 bit floating point.
# The expected value of logpdf was computed with mpmath:
#
# def logpdf(x, nu):
# x = mpmath.mpf(x)
# nu = mpmath.mpf(nu)
# return (mpmath.log(2) + nu*mpmath.log(nu) -
# mpmath.loggamma(nu) + (2*nu - 1)*mpmath.log(x) -
# nu*x**2)
#
nu = 2.5
x = 25
logp = stats.nakagami.logpdf(x, nu)
assert_allclose(logp, -1546.9253055607549)
def test_sf_isf(self):
# Test nakagami sf and isf when the survival function
# value is very small.
# The expected value of the survival function was computed
# with mpmath:
#
# def sf(x, nu):
# x = mpmath.mpf(x)
# nu = mpmath.mpf(nu)
# return mpmath.gammainc(nu, nu*x*x, regularized=True)
#
nu = 2.5
x0 = 5.0
sf = stats.nakagami.sf(x0, nu)
assert_allclose(sf, 2.736273158588307e-25, rtol=1e-13)
# Check round trip back to x0.
x1 = stats.nakagami.isf(sf, nu)
assert_allclose(x1, x0, rtol=1e-13)
@pytest.mark.parametrize("m, ref",
[(5, -0.097341814372152),
(0.5, 0.7257913526447274),
(10, -0.43426184310934907)])
def test_entropy(self, m, ref):
# from sympy import *
# from mpmath import mp
# import numpy as np
# v, x = symbols('v, x', real=True, positive=True)
# pdf = 2 * v ** v / gamma(v) * x ** (2 * v - 1) * exp(-v * x ** 2)
# h = simplify(simplify(integrate(-pdf * log(pdf), (x, 0, oo))))
# entropy = lambdify(v, h, 'mpmath')
# mp.dps = 200
# nu = 5
# ref = np.float64(entropy(mp.mpf(nu)))
# print(ref)
assert_allclose(stats.nakagami.entropy(m), ref, rtol=1.1e-14)
@pytest.mark.parametrize("m, ref",
[(1e-100, -5.0e+99), (1e-10, -4999999965.442979),
(9.999e6, -7.333206478668433), (1.001e7, -7.3337562313259825),
(1e10, -10.787134112333835), (1e100, -114.40346329705756)])
def test_extreme_nu(self, m, ref):
assert_allclose(stats.nakagami.entropy(m), ref)
def test_entropy_overflow(self):
assert np.isfinite(stats.nakagami._entropy(1e100))
assert np.isfinite(stats.nakagami._entropy(1e-100))
@pytest.mark.xfail(reason="Fit of nakagami not reliable, see gh-10908.")
@pytest.mark.parametrize('nu', [1.6, 2.5, 3.9])
@pytest.mark.parametrize('loc', [25.0, 10, 35])
@pytest.mark.parametrize('scale', [13, 5, 20])
def test_fit(self, nu, loc, scale):
# Regression test for gh-13396 (21/27 cases failed previously)
# The first tuple of the parameters' values is discussed in gh-10908
N = 100
samples = stats.nakagami.rvs(size=N, nu=nu, loc=loc,
scale=scale, random_state=1337)
nu_est, loc_est, scale_est = stats.nakagami.fit(samples)
assert_allclose(nu_est, nu, rtol=0.2)
assert_allclose(loc_est, loc, rtol=0.2)
assert_allclose(scale_est, scale, rtol=0.2)
def dlogl_dnu(nu, loc, scale):
return ((-2*nu + 1) * np.sum(1/(samples - loc))
+ 2*nu/scale**2 * np.sum(samples - loc))
def dlogl_dloc(nu, loc, scale):
return (N * (1 + np.log(nu) - polygamma(0, nu)) +
2 * np.sum(np.log((samples - loc) / scale))
- np.sum(((samples - loc) / scale)**2))
def dlogl_dscale(nu, loc, scale):
return (- 2 * N * nu / scale
+ 2 * nu / scale ** 3 * np.sum((samples - loc) ** 2))
assert_allclose(dlogl_dnu(nu_est, loc_est, scale_est), 0, atol=1e-3)
assert_allclose(dlogl_dloc(nu_est, loc_est, scale_est), 0, atol=1e-3)
assert_allclose(dlogl_dscale(nu_est, loc_est, scale_est), 0, atol=1e-3)
@pytest.mark.parametrize('loc', [25.0, 10, 35])
@pytest.mark.parametrize('scale', [13, 5, 20])
def test_fit_nu(self, loc, scale):
# For nu = 0.5, we have analytical values for
# the MLE of the loc and the scale
nu = 0.5
n = 100
samples = stats.nakagami.rvs(size=n, nu=nu, loc=loc,
scale=scale, random_state=1337)
nu_est, loc_est, scale_est = stats.nakagami.fit(samples, f0=nu)
# Analytical values
loc_theo = np.min(samples)
scale_theo = np.sqrt(np.mean((samples - loc_est) ** 2))
assert_allclose(nu_est, nu, rtol=1e-7)
assert_allclose(loc_est, loc_theo, rtol=1e-7)
assert_allclose(scale_est, scale_theo, rtol=1e-7)
class TestWrapCauchy:
def test_cdf_shape_broadcasting(self):
# Regression test for gh-13791.
# Check that wrapcauchy.cdf broadcasts the shape parameter
# correctly.
c = np.array([[0.03, 0.25], [0.5, 0.75]])
x = np.array([[1.0], [4.0]])
p = stats.wrapcauchy.cdf(x, c)
assert p.shape == (2, 2)
scalar_values = [stats.wrapcauchy.cdf(x1, c1)
for (x1, c1) in np.nditer((x, c))]
assert_allclose(p.ravel(), scalar_values, rtol=1e-13)
def test_cdf_center(self):
p = stats.wrapcauchy.cdf(np.pi, 0.03)
assert_allclose(p, 0.5, rtol=1e-14)
def test_cdf(self):
x1 = 1.0 # less than pi
x2 = 4.0 # greater than pi
c = 0.75
p = stats.wrapcauchy.cdf([x1, x2], c)
cr = (1 + c)/(1 - c)
assert_allclose(p[0], np.arctan(cr*np.tan(x1/2))/np.pi)
assert_allclose(p[1], 1 - np.arctan(cr*np.tan(np.pi - x2/2))/np.pi)
def test_rvs_no_size_error():
# _rvs methods must have parameter `size`; see gh-11394
class rvs_no_size_gen(stats.rv_continuous):
def _rvs(self):
return 1
rvs_no_size = rvs_no_size_gen(name='rvs_no_size')
with assert_raises(TypeError, match=r"_rvs\(\) got (an|\d) unexpected"):
rvs_no_size.rvs()
@pytest.mark.parametrize('distname, args', invdistdiscrete + invdistcont)
def test_support_gh13294_regression(distname, args):
if distname in skip_test_support_gh13294_regression:
pytest.skip(f"skipping test for the support method for "
f"distribution {distname}.")
dist = getattr(stats, distname)
# test support method with invalid arguents
if isinstance(dist, stats.rv_continuous):
# test with valid scale
if len(args) != 0:
a0, b0 = dist.support(*args)
assert_equal(a0, np.nan)
assert_equal(b0, np.nan)
# test with invalid scale
# For some distributions, that take no parameters,
# the case of only invalid scale occurs and hence,
# it is implicitly tested in this test case.
loc1, scale1 = 0, -1
a1, b1 = dist.support(*args, loc1, scale1)
assert_equal(a1, np.nan)
assert_equal(b1, np.nan)
else:
a, b = dist.support(*args)
assert_equal(a, np.nan)
assert_equal(b, np.nan)
def test_support_broadcasting_gh13294_regression():
a0, b0 = stats.norm.support([0, 0, 0, 1], [1, 1, 1, -1])
ex_a0 = np.array([-np.inf, -np.inf, -np.inf, np.nan])
ex_b0 = np.array([np.inf, np.inf, np.inf, np.nan])
assert_equal(a0, ex_a0)
assert_equal(b0, ex_b0)
assert a0.shape == ex_a0.shape
assert b0.shape == ex_b0.shape
a1, b1 = stats.norm.support([], [])
ex_a1, ex_b1 = np.array([]), np.array([])
assert_equal(a1, ex_a1)
assert_equal(b1, ex_b1)
assert a1.shape == ex_a1.shape
assert b1.shape == ex_b1.shape
a2, b2 = stats.norm.support([0, 0, 0, 1], [-1])
ex_a2 = np.array(4*[np.nan])
ex_b2 = np.array(4*[np.nan])
assert_equal(a2, ex_a2)
assert_equal(b2, ex_b2)
assert a2.shape == ex_a2.shape
assert b2.shape == ex_b2.shape
def test_stats_broadcasting_gh14953_regression():
# test case in gh14953
loc = [0., 0.]
scale = [[1.], [2.], [3.]]
assert_equal(stats.norm.var(loc, scale), [[1., 1.], [4., 4.], [9., 9.]])
# test some edge cases
loc = np.empty((0, ))
scale = np.empty((1, 0))
assert stats.norm.var(loc, scale).shape == (1, 0)
# Check a few values of the cosine distribution's cdf, sf, ppf and
# isf methods. Expected values were computed with mpmath.
@pytest.mark.parametrize('x, expected',
[(-3.14159, 4.956444476505336e-19),
(3.14, 0.9999999998928399)])
def test_cosine_cdf_sf(x, expected):
assert_allclose(stats.cosine.cdf(x), expected)
assert_allclose(stats.cosine.sf(-x), expected)
@pytest.mark.parametrize('p, expected',
[(1e-6, -3.1080612413765905),
(1e-17, -3.141585429601399),
(0.975, 2.1447547020964923)])
def test_cosine_ppf_isf(p, expected):
assert_allclose(stats.cosine.ppf(p), expected)
assert_allclose(stats.cosine.isf(p), -expected)
def test_cosine_logpdf_endpoints():
logp = stats.cosine.logpdf([-np.pi, np.pi])
# reference value calculated using mpmath assuming `np.cos(-1)` is four
# floating point numbers too high. See gh-18382.
assert_array_less(logp, -37.18838327496655)
def test_distr_params_lists():
# distribution objects are extra distributions added in
# test_discrete_basic. All other distributions are strings (names)
# and so we only choose those to compare whether both lists match.
discrete_distnames = {name for name, _ in distdiscrete
if isinstance(name, str)}
invdiscrete_distnames = {name for name, _ in invdistdiscrete}
assert discrete_distnames == invdiscrete_distnames
cont_distnames = {name for name, _ in distcont}
invcont_distnames = {name for name, _ in invdistcont}
assert cont_distnames == invcont_distnames
def test_moment_order_4():
# gh-13655 reported that if a distribution has a `_stats` method that
# accepts the `moments` parameter, then if the distribution's `moment`
# method is called with `order=4`, the faster/more accurate`_stats` gets
# called, but the results aren't used, and the generic `_munp` method is
# called to calculate the moment anyway. This tests that the issue has
# been fixed.
# stats.skewnorm._stats accepts the `moments` keyword
stats.skewnorm._stats(a=0, moments='k') # no failure = has `moments`
# When `moment` is called, `_stats` is used, so the moment is very accurate
# (exactly equal to Pearson's kurtosis of the normal distribution, 3)
assert stats.skewnorm.moment(order=4, a=0) == 3.0
# At the time of gh-13655, skewnorm._munp() used the generic method
# to compute its result, which was inefficient and not very accurate.
# At that time, the following assertion would fail. skewnorm._munp()
# has since been made more accurate and efficient, so now this test
# is expected to pass.
assert stats.skewnorm._munp(4, 0) == 3.0
class TestRelativisticBW:
@pytest.fixture
def ROOT_pdf_sample_data(self):
"""Sample data points for pdf computed with CERN's ROOT
See - https://root.cern/
Uses ROOT.TMath.BreitWignerRelativistic, available in ROOT
versions 6.27+
pdf calculated for Z0 Boson, W Boson, and Higgs Boson for
x in `np.linspace(0, 200, 401)`.
"""
data = np.load(
Path(__file__).parent /
'data/rel_breitwigner_pdf_sample_data_ROOT.npy'
)
data = np.rec.fromarrays(data.T, names='x,pdf,rho,gamma')
return data
@pytest.mark.parametrize(
"rho,gamma,rtol", [
(36.545206797050334, 2.4952, 5e-14), # Z0 Boson
(38.55107913669065, 2.085, 1e-14), # W Boson
(96292.3076923077, 0.0013, 5e-13), # Higgs Boson
]
)
def test_pdf_against_ROOT(self, ROOT_pdf_sample_data, rho, gamma, rtol):
data = ROOT_pdf_sample_data[
(ROOT_pdf_sample_data['rho'] == rho)
& (ROOT_pdf_sample_data['gamma'] == gamma)
]
x, pdf = data['x'], data['pdf']
assert_allclose(
pdf, stats.rel_breitwigner.pdf(x, rho, scale=gamma), rtol=rtol
)
@pytest.mark.parametrize("rho, Gamma, rtol", [
(36.545206797050334, 2.4952, 5e-13), # Z0 Boson
(38.55107913669065, 2.085, 5e-13), # W Boson
(96292.3076923077, 0.0013, 5e-10), # Higgs Boson
]
)
def test_pdf_against_simple_implementation(self, rho, Gamma, rtol):
# reference implementation straight from formulas on Wikipedia [1]
def pdf(E, M, Gamma):
gamma = np.sqrt(M**2 * (M**2 + Gamma**2))
k = (2 * np.sqrt(2) * M * Gamma * gamma
/ (np.pi * np.sqrt(M**2 + gamma)))
return k / ((E**2 - M**2)**2 + M**2*Gamma**2)
# get reasonable values at which to evaluate the CDF
p = np.linspace(0.05, 0.95, 10)
x = stats.rel_breitwigner.ppf(p, rho, scale=Gamma)
res = stats.rel_breitwigner.pdf(x, rho, scale=Gamma)
ref = pdf(x, rho*Gamma, Gamma)
assert_allclose(res, ref, rtol=rtol)
@pytest.mark.parametrize(
"rho,gamma", [
pytest.param(
36.545206797050334, 2.4952, marks=pytest.mark.slow
), # Z0 Boson
pytest.param(
38.55107913669065, 2.085, marks=pytest.mark.xslow
), # W Boson
pytest.param(
96292.3076923077, 0.0013, marks=pytest.mark.xslow
), # Higgs Boson
]
)
def test_fit_floc(self, rho, gamma):
"""Tests fit for cases where floc is set.
`rel_breitwigner` has special handling for these cases.
"""
seed = 6936804688480013683
rng = np.random.default_rng(seed)
data = stats.rel_breitwigner.rvs(
rho, scale=gamma, size=1000, random_state=rng
)
fit = stats.rel_breitwigner.fit(data, floc=0)
assert_allclose((fit[0], fit[2]), (rho, gamma), rtol=2e-1)
assert fit[1] == 0
# Check again with fscale set.
fit = stats.rel_breitwigner.fit(data, floc=0, fscale=gamma)
assert_allclose(fit[0], rho, rtol=1e-2)
assert (fit[1], fit[2]) == (0, gamma)
class TestJohnsonSU:
@pytest.mark.parametrize("case", [ # a, b, loc, scale, m1, m2, g1, g2
(-0.01, 1.1, 0.02, 0.0001, 0.02000137427557091,
2.1112742956578063e-08, 0.05989781342460999, 20.36324408592951-3),
(2.554395574161155, 2.2482281679651965, 0, 1, -1.54215386737391,
0.7629882028469993, -1.256656139406788, 6.303058419339775-3)])
def test_moment_gh18071(self, case):
# gh-18071 reported an IntegrationWarning emitted by johnsonsu.stats
# Check that the warning is no longer emitted and that the values
# are accurate compared against results from Mathematica.
# Reference values from Mathematica, e.g.
# Mean[JohnsonDistribution["SU",-0.01, 1.1, 0.02, 0.0001]]
res = stats.johnsonsu.stats(*case[:4], moments='mvsk')
assert_allclose(res, case[4:], rtol=1e-14)
class TestTruncPareto:
def test_pdf(self):
# PDF is that of the truncated pareto distribution
b, c = 1.8, 5.3
x = np.linspace(1.8, 5.3)
res = stats.truncpareto(b, c).pdf(x)
ref = stats.pareto(b).pdf(x) / stats.pareto(b).cdf(c)
assert_allclose(res, ref)
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
@pytest.mark.parametrize('fix_b', [True, False])
@pytest.mark.parametrize('fix_c', [True, False])
def test_fit(self, fix_loc, fix_scale, fix_b, fix_c):
rng = np.random.default_rng(6747363148258237171)
b, c, loc, scale = 1.8, 5.3, 1, 2.5
dist = stats.truncpareto(b, c, loc=loc, scale=scale)
data = dist.rvs(size=500, random_state=rng)
kwds = {}
if fix_loc:
kwds['floc'] = loc
if fix_scale:
kwds['fscale'] = scale
if fix_b:
kwds['f0'] = b
if fix_c:
kwds['f1'] = c
if fix_loc and fix_scale and fix_b and fix_c:
message = "All parameters fixed. There is nothing to optimize."
with pytest.raises(RuntimeError, match=message):
stats.truncpareto.fit(data, **kwds)
else:
_assert_less_or_close_loglike(stats.truncpareto, data, **kwds)
class TestKappa3:
def test_sf(self):
# During development of gh-18822, we found that the override of
# kappa3.sf could experience overflow where the version in main did
# not. Check that this does not happen in final implementation.
sf0 = 1 - stats.kappa3.cdf(0.5, 1e5)
sf1 = stats.kappa3.sf(0.5, 1e5)
assert_allclose(sf1, sf0)
# Cases are (distribution name, log10 of smallest probability mass to test,
# log10 of the complement of the largest probability mass to test, atol,
# rtol). None uses default values.
@pytest.mark.parametrize("case", [("kappa3", None, None, None, None),
("loglaplace", None, None, None, None),
("lognorm", None, None, None, None),
("lomax", None, None, None, None),
("pareto", None, None, None, None),])
def test_sf_isf_overrides(case):
# Test that SF is the inverse of ISF. Supplements
# `test_continuous_basic.check_sf_isf` for distributions with overridden
# `sf` and `isf` methods.
distname, lp1, lp2, atol, rtol = case
lpm = np.log10(0.5) # log10 of the probability mass at the median
lp1 = lp1 or -290
lp2 = lp2 or -14
atol = atol or 0
rtol = rtol or 1e-12
dist = getattr(stats, distname)
params = dict(distcont)[distname]
dist_frozen = dist(*params)
# Test (very deep) right tail to median. We can benchmark with random
# (loguniform) points, but strictly logspaced points are fine for tests.
ref = np.logspace(lp1, lpm)
res = dist_frozen.sf(dist_frozen.isf(ref))
assert_allclose(res, ref, atol=atol, rtol=rtol)
# test median to left tail
ref = 1 - np.logspace(lp2, lpm, 20)
res = dist_frozen.sf(dist_frozen.isf(ref))
assert_allclose(res, ref, atol=atol, rtol=rtol)
| 374,190
| 38.626284
| 87
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_multicomp.py
|
import copy
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import stats
from scipy.stats._multicomp import _pvalue_dunnett, DunnettResult
class TestDunnett:
# For the following tests, p-values were computed using Matlab, e.g.
# sample = [18. 15. 18. 16. 17. 15. 14. 14. 14. 15. 15....
# 14. 15. 14. 22. 18. 21. 21. 10. 10. 11. 9....
# 25. 26. 17.5 16. 15.5 14.5 22. 22. 24. 22.5 29....
# 24.5 20. 18. 18.5 17.5 26.5 13. 16.5 13. 13. 13....
# 28. 27. 34. 31. 29. 27. 24. 23. 38. 36. 25....
# 38. 26. 22. 36. 27. 27. 32. 28. 31....
# 24. 27. 33. 32. 28. 19. 37. 31. 36. 36....
# 34. 38. 32. 38. 32....
# 26. 24. 26. 25. 29. 29.5 16.5 36. 44....
# 25. 27. 19....
# 25. 20....
# 28.];
# j = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ...
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ...
# 0 0 0 0...
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1...
# 2 2 2 2 2 2 2 2 2...
# 3 3 3...
# 4 4...
# 5];
# [~, ~, stats] = anova1(sample, j, "off");
# [results, ~, ~, gnames] = multcompare(stats, ...
# "CriticalValueType", "dunnett", ...
# "Approximate", false);
# tbl = array2table(results, "VariableNames", ...
# ["Group", "Control Group", "Lower Limit", ...
# "Difference", "Upper Limit", "P-value"]);
# tbl.("Group") = gnames(tbl.("Group"));
# tbl.("Control Group") = gnames(tbl.("Control Group"))
# Matlab doesn't report the statistic, so the statistics were
# computed using R multcomp `glht`, e.g.:
# library(multcomp)
# options(digits=16)
# control < - c(18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0,
# 15.0, 15.0, 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0,
# 10.0, 10.0, 11.0, 9.0, 25.0, 26.0, 17.5, 16.0, 15.5,
# 14.5, 22.0, 22.0, 24.0, 22.5, 29.0, 24.5, 20.0, 18.0,
# 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0, 28.0,
# 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0,
# 25.0, 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0,
# 31.0)
# t < - c(24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0,
# 34.0, 38.0, 32.0, 38.0, 32.0)
# w < - c(26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0)
# x < - c(25.0, 27.0, 19.0)
# y < - c(25.0, 20.0)
# z < - c(28.0)
#
# groups = factor(rep(c("control", "t", "w", "x", "y", "z"),
# times=c(length(control), length(t), length(w),
# length(x), length(y), length(z))))
# df < - data.frame(response=c(control, t, w, x, y, z),
# group=groups)
# model < - aov(response
# ~group, data = df)
# test < - glht(model=model,
# linfct=mcp(group="Dunnett"),
# alternative="g")
# summary(test)
# confint(test)
# p-values agreed with those produced by Matlab to at least atol=1e-3
# From Matlab's documentation on multcompare
samples_1 = [
[
24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0,
34.0, 38.0, 32.0, 38.0, 32.0
],
[26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0],
[25.0, 27.0, 19.0],
[25.0, 20.0],
[28.0]
]
control_1 = [
18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0, 15.0, 15.0,
14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0, 10.0, 10.0, 11.0, 9.0,
25.0, 26.0, 17.5, 16.0, 15.5, 14.5, 22.0, 22.0, 24.0, 22.5, 29.0,
24.5, 20.0, 18.0, 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0,
28.0, 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0, 25.0,
38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0, 31.0
]
pvalue_1 = [4.727e-06, 0.022346, 0.97912, 0.99953, 0.86579] # Matlab
# Statistic, alternative p-values, and CIs computed with R multcomp `glht`
p_1_twosided = [1e-4, 0.02237, 0.97913, 0.99953, 0.86583]
p_1_greater = [1e-4, 0.011217, 0.768500, 0.896991, 0.577211]
p_1_less = [1, 1, 0.99660, 0.98398, .99953]
statistic_1 = [5.27356, 2.91270, 0.60831, 0.27002, 0.96637]
ci_1_twosided = [[5.3633917835622, 0.7296142201217, -8.3879817106607,
-11.9090753452911, -11.7655021543469],
[15.9709832164378, 13.8936496687672, 13.4556900439941,
14.6434503452911, 25.4998771543469]]
ci_1_greater = [5.9036402398526, 1.4000632918725, -7.2754756323636,
-10.5567456382391, -9.8675629499576]
ci_1_less = [15.4306165948619, 13.2230539537359, 12.3429406339544,
13.2908248513211, 23.6015228251660]
pvalues_1 = dict(twosided=p_1_twosided, less=p_1_less, greater=p_1_greater)
cis_1 = dict(twosided=ci_1_twosided, less=ci_1_less, greater=ci_1_greater)
case_1 = dict(samples=samples_1, control=control_1, statistic=statistic_1,
pvalues=pvalues_1, cis=cis_1)
# From Dunnett1955 comparing with R's DescTools: DunnettTest
samples_2 = [[9.76, 8.80, 7.68, 9.36], [12.80, 9.68, 12.16, 9.20, 10.55]]
control_2 = [7.40, 8.50, 7.20, 8.24, 9.84, 8.32]
pvalue_2 = [0.6201, 0.0058]
# Statistic, alternative p-values, and CIs computed with R multcomp `glht`
p_2_twosided = [0.6201020, 0.0058254]
p_2_greater = [0.3249776, 0.0029139]
p_2_less = [0.91676, 0.99984]
statistic_2 = [0.85703, 3.69375]
ci_2_twosided = [[-1.2564116462124, 0.8396273539789],
[2.5564116462124, 4.4163726460211]]
ci_2_greater = [-0.9588591188156, 1.1187563667543]
ci_2_less = [2.2588591188156, 4.1372436332457]
pvalues_2 = dict(twosided=p_2_twosided, less=p_2_less, greater=p_2_greater)
cis_2 = dict(twosided=ci_2_twosided, less=ci_2_less, greater=ci_2_greater)
case_2 = dict(samples=samples_2, control=control_2, statistic=statistic_2,
pvalues=pvalues_2, cis=cis_2)
samples_3 = [[55, 64, 64], [55, 49, 52], [50, 44, 41]]
control_3 = [55, 47, 48]
pvalue_3 = [0.0364, 0.8966, 0.4091]
# Statistic, alternative p-values, and CIs computed with R multcomp `glht`
p_3_twosided = [0.036407, 0.896539, 0.409295]
p_3_greater = [0.018277, 0.521109, 0.981892]
p_3_less = [0.99944, 0.90054, 0.20974]
statistic_3 = [3.09073, 0.56195, -1.40488]
ci_3_twosided = [[0.7529028025053, -8.2470971974947, -15.2470971974947],
[21.2470971974947, 12.2470971974947, 5.2470971974947]]
ci_3_greater = [2.4023682323149, -6.5976317676851, -13.5976317676851]
ci_3_less = [19.5984402363662, 10.5984402363662, 3.5984402363662]
pvalues_3 = dict(twosided=p_3_twosided, less=p_3_less, greater=p_3_greater)
cis_3 = dict(twosided=ci_3_twosided, less=ci_3_less, greater=ci_3_greater)
case_3 = dict(samples=samples_3, control=control_3, statistic=statistic_3,
pvalues=pvalues_3, cis=cis_3)
# From Thomson and Short,
# Mucociliary function in health, chronic obstructive airway disease,
# and asbestosis, Journal of Applied Physiology, 1969. Table 1
# Comparing with R's DescTools: DunnettTest
samples_4 = [[3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]]
control_4 = [2.9, 3.0, 2.5, 2.6, 3.2]
pvalue_4 = [0.5832, 0.9982]
# Statistic, alternative p-values, and CIs computed with R multcomp `glht`
p_4_twosided = [0.58317, 0.99819]
p_4_greater = [0.30225, 0.69115]
p_4_less = [0.91929, 0.65212]
statistic_4 = [0.90875, -0.05007]
ci_4_twosided = [[-0.6898153448579, -1.0333456251632],
[1.4598153448579, 0.9933456251632]]
ci_4_greater = [-0.5186459268412, -0.8719655502147 ]
ci_4_less = [1.2886459268412, 0.8319655502147]
pvalues_4 = dict(twosided=p_4_twosided, less=p_4_less, greater=p_4_greater)
cis_4 = dict(twosided=ci_4_twosided, less=ci_4_less, greater=ci_4_greater)
case_4 = dict(samples=samples_4, control=control_4, statistic=statistic_4,
pvalues=pvalues_4, cis=cis_4)
@pytest.mark.parametrize(
'rho, n_groups, df, statistic, pvalue, alternative',
[
# From Dunnett1955
# Tables 1a and 1b pages 1117-1118
(0.5, 1, 10, 1.81, 0.05, "greater"), # different than two-sided
(0.5, 3, 10, 2.34, 0.05, "greater"),
(0.5, 2, 30, 1.99, 0.05, "greater"),
(0.5, 5, 30, 2.33, 0.05, "greater"),
(0.5, 4, 12, 3.32, 0.01, "greater"),
(0.5, 7, 12, 3.56, 0.01, "greater"),
(0.5, 2, 60, 2.64, 0.01, "greater"),
(0.5, 4, 60, 2.87, 0.01, "greater"),
(0.5, 4, 60, [2.87, 2.21], [0.01, 0.05], "greater"),
# Tables 2a and 2b pages 1119-1120
(0.5, 1, 10, 2.23, 0.05, "two-sided"), # two-sided
(0.5, 3, 10, 2.81, 0.05, "two-sided"),
(0.5, 2, 30, 2.32, 0.05, "two-sided"),
(0.5, 3, 20, 2.57, 0.05, "two-sided"),
(0.5, 4, 12, 3.76, 0.01, "two-sided"),
(0.5, 7, 12, 4.08, 0.01, "two-sided"),
(0.5, 2, 60, 2.90, 0.01, "two-sided"),
(0.5, 4, 60, 3.14, 0.01, "two-sided"),
(0.5, 4, 60, [3.14, 2.55], [0.01, 0.05], "two-sided"),
],
)
def test_critical_values(
self, rho, n_groups, df, statistic, pvalue, alternative
):
rng = np.random.default_rng(165250594791731684851746311027739134893)
rho = np.full((n_groups, n_groups), rho)
np.fill_diagonal(rho, 1)
statistic = np.array(statistic)
res = _pvalue_dunnett(
rho=rho, df=df, statistic=statistic,
alternative=alternative,
rng=rng
)
assert_allclose(res, pvalue, atol=5e-3)
@pytest.mark.parametrize(
'samples, control, pvalue, statistic',
[
(samples_1, control_1, pvalue_1, statistic_1),
(samples_2, control_2, pvalue_2, statistic_2),
(samples_3, control_3, pvalue_3, statistic_3),
(samples_4, control_4, pvalue_4, statistic_4),
]
)
def test_basic(self, samples, control, pvalue, statistic):
rng = np.random.default_rng(11681140010308601919115036826969764808)
res = stats.dunnett(*samples, control=control, random_state=rng)
assert isinstance(res, DunnettResult)
assert_allclose(res.statistic, statistic, rtol=5e-5)
assert_allclose(res.pvalue, pvalue, rtol=1e-2, atol=1e-4)
@pytest.mark.parametrize(
'alternative',
['two-sided', 'less', 'greater']
)
def test_ttest_ind(self, alternative):
# check that `dunnett` agrees with `ttest_ind`
# when there are only two groups
rng = np.random.default_rng(114184017807316971636137493526995620351)
for _ in range(10):
sample = rng.integers(-100, 100, size=(10,))
control = rng.integers(-100, 100, size=(10,))
res = stats.dunnett(
sample, control=control,
alternative=alternative, random_state=rng
)
ref = stats.ttest_ind(
sample, control,
alternative=alternative, random_state=rng
)
assert_allclose(res.statistic, ref.statistic, rtol=1e-3, atol=1e-5)
assert_allclose(res.pvalue, ref.pvalue, rtol=1e-3, atol=1e-5)
@pytest.mark.parametrize(
'alternative, pvalue',
[
('less', [0, 1]),
('greater', [1, 0]),
('two-sided', [0, 0]),
]
)
def test_alternatives(self, alternative, pvalue):
rng = np.random.default_rng(114184017807316971636137493526995620351)
# width of 20 and min diff between samples/control is 60
# and maximal diff would be 100
sample_less = rng.integers(0, 20, size=(10,))
control = rng.integers(80, 100, size=(10,))
sample_greater = rng.integers(160, 180, size=(10,))
res = stats.dunnett(
sample_less, sample_greater, control=control,
alternative=alternative, random_state=rng
)
assert_allclose(res.pvalue, pvalue, atol=1e-7)
ci = res.confidence_interval()
# two-sided is comparable for high/low
if alternative == 'less':
assert np.isneginf(ci.low).all()
assert -100 < ci.high[0] < -60
assert 60 < ci.high[1] < 100
elif alternative == 'greater':
assert -100 < ci.low[0] < -60
assert 60 < ci.low[1] < 100
assert np.isposinf(ci.high).all()
elif alternative == 'two-sided':
assert -100 < ci.low[0] < -60
assert 60 < ci.low[1] < 100
assert -100 < ci.high[0] < -60
assert 60 < ci.high[1] < 100
@pytest.mark.parametrize("case", [case_1, case_2, case_3, case_4])
@pytest.mark.parametrize("alternative", ['less', 'greater', 'two-sided'])
def test_against_R_multicomp_glht(self, case, alternative):
rng = np.random.default_rng(189117774084579816190295271136455278291)
samples = case['samples']
control = case['control']
alternatives = {'less': 'less', 'greater': 'greater',
'two-sided': 'twosided'}
p_ref = case['pvalues'][alternative.replace('-', '')]
res = stats.dunnett(*samples, control=control, alternative=alternative,
random_state=rng)
# atol can't be tighter because R reports some pvalues as "< 1e-4"
assert_allclose(res.pvalue, p_ref, rtol=5e-3, atol=1e-4)
ci_ref = case['cis'][alternatives[alternative]]
if alternative == "greater":
ci_ref = [ci_ref, np.inf]
elif alternative == "less":
ci_ref = [-np.inf, ci_ref]
assert res._ci is None
assert res._ci_cl is None
ci = res.confidence_interval(confidence_level=0.95)
assert_allclose(ci.low, ci_ref[0], rtol=5e-3, atol=1e-5)
assert_allclose(ci.high, ci_ref[1], rtol=5e-3, atol=1e-5)
# re-run to use the cached value "is" to check id as same object
assert res._ci is ci
assert res._ci_cl == 0.95
ci_ = res.confidence_interval(confidence_level=0.95)
assert ci_ is ci
@pytest.mark.parametrize('alternative', ["two-sided", "less", "greater"])
def test_str(self, alternative):
rng = np.random.default_rng(189117774084579816190295271136455278291)
res = stats.dunnett(
*self.samples_3, control=self.control_3, alternative=alternative,
random_state=rng
)
# check some str output
res_str = str(res)
assert '(Sample 2 - Control)' in res_str
assert '95.0%' in res_str
if alternative == 'less':
assert '-inf' in res_str
assert '19.' in res_str
elif alternative == 'greater':
assert 'inf' in res_str
assert '-13.' in res_str
else:
assert 'inf' not in res_str
assert '21.' in res_str
def test_warnings(self):
rng = np.random.default_rng(189117774084579816190295271136455278291)
res = stats.dunnett(
*self.samples_3, control=self.control_3, random_state=rng
)
msg = r"Computation of the confidence interval did not converge"
with pytest.warns(UserWarning, match=msg):
res._allowance(tol=1e-5)
def test_raises(self):
samples, control = self.samples_3, self.control_3
# alternative
with pytest.raises(ValueError, match="alternative must be"):
stats.dunnett(*samples, control=control, alternative='bob')
# 2D for a sample
samples_ = copy.deepcopy(samples)
samples_[0] = [samples_[0]]
with pytest.raises(ValueError, match="must be 1D arrays"):
stats.dunnett(*samples_, control=control)
# 2D for control
control_ = copy.deepcopy(control)
control_ = [control_]
with pytest.raises(ValueError, match="must be 1D arrays"):
stats.dunnett(*samples, control=control_)
# No obs in a sample
samples_ = copy.deepcopy(samples)
samples_[1] = []
with pytest.raises(ValueError, match="at least 1 observation"):
stats.dunnett(*samples_, control=control)
# No obs in control
control_ = []
with pytest.raises(ValueError, match="at least 1 observation"):
stats.dunnett(*samples, control=control_)
res = stats.dunnett(*samples, control=control)
with pytest.raises(ValueError, match="Confidence level must"):
res.confidence_interval(confidence_level=3)
@pytest.mark.filterwarnings("ignore:Computation of the confidence")
@pytest.mark.parametrize('n_samples', [1, 2, 3])
def test_shapes(self, n_samples):
rng = np.random.default_rng(689448934110805334)
samples = rng.normal(size=(n_samples, 10))
control = rng.normal(size=10)
res = stats.dunnett(*samples, control=control, random_state=rng)
assert res.statistic.shape == (n_samples,)
assert res.pvalue.shape == (n_samples,)
ci = res.confidence_interval()
assert ci.low.shape == (n_samples,)
assert ci.high.shape == (n_samples,)
| 17,826
| 43.017284
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_multivariate.py
|
"""
Test functions for multivariate normal distributions.
"""
import pickle
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_array_less, assert_)
import pytest
from pytest import raises as assert_raises
from .test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import (_PSD,
_lnB,
_cho_inv_batch,
multivariate_normal_frozen)
from scipy.stats import (multivariate_normal, multivariate_hypergeom,
matrix_normal, special_ortho_group, ortho_group,
random_correlation, unitary_group, dirichlet,
beta, wishart, multinomial, invwishart, chi2,
invgamma, norm, uniform, ks_2samp, kstest, binom,
hypergeom, multivariate_t, cauchy, normaltest,
random_table, uniform_direction, vonmises_fisher,
dirichlet_multinomial, vonmises)
from scipy.stats import _covariance, Covariance
from scipy import stats
from scipy.integrate import romb, qmc_quad, tplquad
from scipy.special import multigammaln
from scipy._lib._pep440 import Version
from .common_tests import check_random_state_property
from .data._mvt import _qsimvtv
from unittest.mock import patch
def assert_close(res, ref, *args, **kwargs):
res, ref = np.asarray(res), np.asarray(ref)
assert_allclose(res, ref, *args, **kwargs)
assert_equal(res.shape, ref.shape)
class TestCovariance:
def test_input_validation(self):
message = "The input `precision` must be a square, two-dimensional..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaPrecision(np.ones(2))
message = "`precision.shape` must equal `covariance.shape`."
with pytest.raises(ValueError, match=message):
_covariance.CovViaPrecision(np.eye(3), covariance=np.eye(2))
message = "The input `diagonal` must be a one-dimensional array..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaDiagonal("alpaca")
message = "The input `cholesky` must be a square, two-dimensional..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaCholesky(np.ones(2))
message = "The input `eigenvalues` must be a one-dimensional..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaEigendecomposition(("alpaca", np.eye(2)))
message = "The input `eigenvectors` must be a square..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaEigendecomposition((np.ones(2), "alpaca"))
message = "The shapes of `eigenvalues` and `eigenvectors` must be..."
with pytest.raises(ValueError, match=message):
_covariance.CovViaEigendecomposition(([1, 2, 3], np.eye(2)))
_covariance_preprocessing = {"Diagonal": np.diag,
"Precision": np.linalg.inv,
"Cholesky": np.linalg.cholesky,
"Eigendecomposition": np.linalg.eigh,
"PSD": lambda x:
_PSD(x, allow_singular=True)}
_all_covariance_types = np.array(list(_covariance_preprocessing))
_matrices = {"diagonal full rank": np.diag([1, 2, 3]),
"general full rank": [[5, 1, 3], [1, 6, 4], [3, 4, 7]],
"diagonal singular": np.diag([1, 0, 3]),
"general singular": [[5, -1, 0], [-1, 5, 0], [0, 0, 0]]}
_cov_types = {"diagonal full rank": _all_covariance_types,
"general full rank": _all_covariance_types[1:],
"diagonal singular": _all_covariance_types[[0, -2, -1]],
"general singular": _all_covariance_types[-2:]}
@pytest.mark.parametrize("cov_type_name", _all_covariance_types[:-1])
def test_factories(self, cov_type_name):
A = np.diag([1, 2, 3])
x = [-4, 2, 5]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
factory = getattr(Covariance, f"from_{cov_type_name.lower()}")
res = factory(preprocessing(A))
ref = cov_type(preprocessing(A))
assert type(res) == type(ref)
assert_allclose(res.whiten(x), ref.whiten(x))
@pytest.mark.parametrize("matrix_type", list(_matrices))
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
def test_covariance(self, matrix_type, cov_type_name):
message = (f"CovVia{cov_type_name} does not support {matrix_type} "
"matrices")
if cov_type_name not in self._cov_types[matrix_type]:
pytest.skip(message)
A = self._matrices[matrix_type]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
psd = _PSD(A, allow_singular=True)
# test properties
cov_object = cov_type(preprocessing(A))
assert_close(cov_object.log_pdet, psd.log_pdet)
assert_equal(cov_object.rank, psd.rank)
assert_equal(cov_object.shape, np.asarray(A).shape)
assert_close(cov_object.covariance, np.asarray(A))
# test whitening/coloring 1D x
rng = np.random.default_rng(5292808890472453840)
x = rng.random(size=3)
res = cov_object.whiten(x)
ref = x @ psd.U
# res != ref in general; but res @ res == ref @ ref
assert_close(res @ res, ref @ ref)
if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
# CovViaPSD does not have _colorize
assert_close(cov_object.colorize(res), x)
# test whitening/coloring 3D x
x = rng.random(size=(2, 4, 3))
res = cov_object.whiten(x)
ref = x @ psd.U
assert_close((res**2).sum(axis=-1), (ref**2).sum(axis=-1))
if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
assert_close(cov_object.colorize(res), x)
@pytest.mark.parametrize("size", [None, tuple(), 1, (2, 4, 3)])
@pytest.mark.parametrize("matrix_type", list(_matrices))
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
def test_mvn_with_covariance(self, size, matrix_type, cov_type_name):
message = (f"CovVia{cov_type_name} does not support {matrix_type} "
"matrices")
if cov_type_name not in self._cov_types[matrix_type]:
pytest.skip(message)
A = self._matrices[matrix_type]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
mean = [0.1, 0.2, 0.3]
cov_object = cov_type(preprocessing(A))
mvn = multivariate_normal
dist0 = multivariate_normal(mean, A, allow_singular=True)
dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
rng = np.random.default_rng(5292808890472453840)
x = rng.multivariate_normal(mean, A, size=size)
rng = np.random.default_rng(5292808890472453840)
x1 = mvn.rvs(mean, cov_object, size=size, random_state=rng)
rng = np.random.default_rng(5292808890472453840)
x2 = mvn(mean, cov_object, seed=rng).rvs(size=size)
if isinstance(cov_object, _covariance.CovViaPSD):
assert_close(x1, np.squeeze(x)) # for backward compatibility
assert_close(x2, np.squeeze(x))
else:
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
assert_close(x2, x1)
assert_close(mvn.pdf(x, mean, cov_object), dist0.pdf(x))
assert_close(dist1.pdf(x), dist0.pdf(x))
assert_close(mvn.logpdf(x, mean, cov_object), dist0.logpdf(x))
assert_close(dist1.logpdf(x), dist0.logpdf(x))
assert_close(mvn.entropy(mean, cov_object), dist0.entropy())
assert_close(dist1.entropy(), dist0.entropy())
@pytest.mark.parametrize("size", [tuple(), (2, 4, 3)])
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
def test_mvn_with_covariance_cdf(self, size, cov_type_name):
# This is split from the test above because it's slow to be running
# with all matrix types, and there's no need because _mvn.mvnun
# does the calculation. All Covariance needs to do is pass is
# provide the `covariance` attribute.
matrix_type = "diagonal full rank"
A = self._matrices[matrix_type]
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
preprocessing = self._covariance_preprocessing[cov_type_name]
mean = [0.1, 0.2, 0.3]
cov_object = cov_type(preprocessing(A))
mvn = multivariate_normal
dist0 = multivariate_normal(mean, A, allow_singular=True)
dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
rng = np.random.default_rng(5292808890472453840)
x = rng.multivariate_normal(mean, A, size=size)
assert_close(mvn.cdf(x, mean, cov_object), dist0.cdf(x))
assert_close(dist1.cdf(x), dist0.cdf(x))
assert_close(mvn.logcdf(x, mean, cov_object), dist0.logcdf(x))
assert_close(dist1.logcdf(x), dist0.logcdf(x))
def test_covariance_instantiation(self):
message = "The `Covariance` class cannot be instantiated directly."
with pytest.raises(NotImplementedError, match=message):
Covariance()
@pytest.mark.filterwarnings("ignore::RuntimeWarning") # matrix not PSD
def test_gh9942(self):
# Originally there was a mistake in the `multivariate_normal_frozen`
# `rvs` method that caused all covariance objects to be processed as
# a `_CovViaPSD`. Ensure that this is resolved.
A = np.diag([1, 2, -1e-8])
n = A.shape[0]
mean = np.zeros(n)
# Error if the matrix is processed as a `_CovViaPSD`
with pytest.raises(ValueError, match="The input matrix must be..."):
multivariate_normal(mean, A).rvs()
# No error if it is provided as a `CovViaEigendecomposition`
seed = 3562050283508273023
rng1 = np.random.default_rng(seed)
rng2 = np.random.default_rng(seed)
cov = Covariance.from_eigendecomposition(np.linalg.eigh(A))
rv = multivariate_normal(mean, cov)
res = rv.rvs(random_state=rng1)
ref = multivariate_normal.rvs(mean, cov, random_state=rng2)
assert_equal(res, ref)
def _random_covariance(dim, evals, rng, singular=False):
# Generates random covariance matrix with dimensionality `dim` and
# eigenvalues `evals` using provided Generator `rng`. Randomly sets
# some evals to zero if `singular` is True.
A = rng.random((dim, dim))
A = A @ A.T
_, v = np.linalg.eigh(A)
if singular:
zero_eigs = rng.normal(size=dim) > 0
evals[zero_eigs] = 0
cov = v @ np.diag(evals) @ v.T
return cov
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
class TestMultivariateNormal:
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on scalar data, the cdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
cdf = multivariate_normal.cdf(x, mean, cov)
assert_equal(cdf.ndim, 0)
# When evaluated on a single vector, the cdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
cdf = multivariate_normal.cdf(x, mean, cov)
assert_equal(cdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_logpdf_default_values(self):
# Check that the log of the pdf is in fact the logpdf
# with default parameters Mean=None and cov = 1
np.random.seed(1234)
x = np.random.randn(5)
d1 = multivariate_normal.logpdf(x)
d2 = multivariate_normal.pdf(x)
# check whether default values are being used
d3 = multivariate_normal.logpdf(x, None, 1)
d4 = multivariate_normal.pdf(x, None, 1)
assert_allclose(d1, np.log(d2))
assert_allclose(d3, np.log(d4))
def test_logcdf(self):
# Check that the log of the cdf is in fact the logcdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logcdf(x, mean, cov)
d2 = multivariate_normal.cdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_logcdf_default_values(self):
# Check that the log of the cdf is in fact the logcdf
# with default parameters Mean=None and cov = 1
np.random.seed(1234)
x = np.random.randn(5)
d1 = multivariate_normal.logcdf(x)
d2 = multivariate_normal.cdf(x)
# check whether default values are being used
d3 = multivariate_normal.logcdf(x, None, 1)
d4 = multivariate_normal.cdf(x, None, 1)
assert_allclose(d1, np.log(d2))
assert_allclose(d3, np.log(d4))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_object.rank, expected_rank)
def test_degenerate_distributions(self):
for n in range(1, 5):
z = np.random.randn(n)
for k in range(1, n):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger singular one.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Embed part of the vector in the same way
x = np.zeros(n)
x[:k] = z[:k]
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_object.rank, k)
assert_equal(distn_nn.cov_object.rank, k)
assert_equal(distn_rr.cov_object.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
# Add an orthogonal component and find the density
y_orth = y + u[:, -1]
pdf_rr_orth = distn_rr.pdf(y_orth)
logpdf_rr_orth = distn_rr.logpdf(y_orth)
# Ensure that this has zero probability
assert_equal(pdf_rr_orth, 0.0)
assert_equal(logpdf_rr_orth, -np.inf)
def test_degenerate_array(self):
# Test that we can generate arrays of random variate from a degenerate
# multivariate normal, and that the pdf for these samples is non-zero
# (i.e. samples from the distribution lie on the subspace)
k = 10
for n in range(2, 6):
for r in range(1, n):
mn = np.zeros(n)
u = _sample_orthonormal_matrix(n)[:, :r]
vr = np.dot(u, u.T)
X = multivariate_normal.rvs(mean=mn, cov=vr, size=k)
pdf = multivariate_normal.pdf(X, mean=mn, cov=vr,
allow_singular=True)
assert_equal(pdf.size, k)
assert np.all(pdf > 0.0)
logpdf = multivariate_normal.logpdf(X, mean=mn, cov=vr,
allow_singular=True)
assert_equal(logpdf.size, k)
assert np.all(logpdf > -np.inf)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
desired_pdf = multivariate_normal.pdf(X, mean, cov)
desired_cdf = multivariate_normal.cdf(X, mean, cov)
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
assert_allclose(actual, desired_pdf[i,j])
# Repeat for cdf
actual = multivariate_normal.cdf(X[i, j], mean, cov)
assert_allclose(actual, desired_cdf[i,j], rtol=1e-3)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
# The same should hold for the cumulative distribution function
d1 = norm.cdf(x, mean, scale)
d2 = multivariate_normal.cdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov))
assert_allclose(norm_frozen.logcdf(x),
multivariate_normal.logcdf(x, mean, cov))
@pytest.mark.parametrize(
'covariance',
[
np.eye(2),
Covariance.from_diagonal([1, 1]),
]
)
def test_frozen_multivariate_normal_exposes_attributes(self, covariance):
mean = np.ones((2,))
cov_should_be = np.eye(2)
norm_frozen = multivariate_normal(mean, covariance)
assert np.allclose(norm_frozen.mean, mean)
assert np.allclose(norm_frozen.cov, cov_should_be)
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = np.full(n, 0.5)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
assert_raises(e, multivariate_normal.cdf, x, mean, cov)
assert_raises(e, multivariate_normal.logcdf, x, mean, cov)
# Message used to be "singular matrix", but this is more accurate.
# See gh-15508
cov = [[1., 0.], [1., 1.]]
msg = "When `allow_singular is False`, the input matrix"
with pytest.raises(np.linalg.LinAlgError, match=msg):
multivariate_normal(cov=cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
# Compare the multivariate cdf with some values precomputed
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma)
r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761,
0.1063242573, 0.2501068509])
cdf = multivariate_normal.cdf(r, mean, cov)
assert_allclose(cdf, r_cdf, atol=2e-5)
# Also test bivariate cdf with some values precomputed
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > mu <- c(1, 3)
# > Sigma <- matrix(c(1,2,2,5), 2, 2)
# > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma)
r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571,
0.40696599, 0.66470577])
r2 = np.array([x, y]).T
mean2 = np.array([1, 3], 'd')
cov2 = np.array([[1, 2], [2, 5]], 'd')
cdf2 = multivariate_normal.cdf(r2, mean2, cov2)
assert_allclose(cdf2, r_cdf2, atol=1e-5)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
def test_cdf_with_lower_limit_arrays(self):
# test CDF with lower limit in several dimensions
rng = np.random.default_rng(2408071309372769818)
mean = [0, 0]
cov = np.eye(2)
a = rng.random((4, 3, 2))*6 - 3
b = rng.random((4, 3, 2))*6 - 3
cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
cdf2a = multivariate_normal.cdf(b, mean, cov)
cdf2b = multivariate_normal.cdf(a, mean, cov)
ab1 = np.concatenate((a[..., 0:1], b[..., 1:2]), axis=-1)
ab2 = np.concatenate((a[..., 1:2], b[..., 0:1]), axis=-1)
cdf2ab1 = multivariate_normal.cdf(ab1, mean, cov)
cdf2ab2 = multivariate_normal.cdf(ab2, mean, cov)
cdf2 = cdf2a + cdf2b - cdf2ab1 - cdf2ab2
assert_allclose(cdf1, cdf2)
def test_cdf_with_lower_limit_consistency(self):
# check that multivariate normal CDF functions are consistent
rng = np.random.default_rng(2408071309372769818)
mean = rng.random(3)
cov = rng.random((3, 3))
cov = cov @ cov.T
a = rng.random((2, 3))*6 - 3
b = rng.random((2, 3))*6 - 3
cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
cdf2 = multivariate_normal(mean, cov).cdf(b, lower_limit=a)
cdf3 = np.exp(multivariate_normal.logcdf(b, mean, cov, lower_limit=a))
cdf4 = np.exp(multivariate_normal(mean, cov).logcdf(b, lower_limit=a))
assert_allclose(cdf2, cdf1, rtol=1e-4)
assert_allclose(cdf3, cdf1, rtol=1e-4)
assert_allclose(cdf4, cdf1, rtol=1e-4)
def test_cdf_signs(self):
# check that sign of output is correct when np.any(lower > x)
mean = np.zeros(3)
cov = np.eye(3)
b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]]
a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]
# when odd number of elements of b < a, output is negative
expected_signs = np.array([1, -1, -1, 1])
cdf = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
assert_allclose(cdf, cdf[0]*expected_signs)
def test_mean_cov(self):
# test the interaction between a Covariance object and mean
P = np.diag(1 / np.array([1, 2, 3]))
cov_object = _covariance.CovViaPrecision(P)
message = "`cov` represents a covariance matrix in 3 dimensions..."
with pytest.raises(ValueError, match=message):
multivariate_normal.entropy([0, 0], cov_object)
with pytest.raises(ValueError, match=message):
multivariate_normal([0, 0], cov_object)
x = [0.5, 0.5, 0.5]
ref = multivariate_normal.pdf(x, [0, 0, 0], cov_object)
assert_equal(multivariate_normal.pdf(x, cov=cov_object), ref)
ref = multivariate_normal.pdf(x, [1, 1, 1], cov_object)
assert_equal(multivariate_normal.pdf(x, 1, cov=cov_object), ref)
def test_fit_error(self):
data = [1, 3]
error_msg = "`x` must be two-dimensional."
with pytest.raises(ValueError, match=error_msg):
multivariate_normal.fit(data)
@pytest.mark.parametrize('dim', (3, 5))
def test_fit_correctness(self, dim):
rng = np.random.default_rng(4385269356937404)
x = rng.random((100, dim))
mean_est, cov_est = multivariate_normal.fit(x)
mean_ref, cov_ref = np.mean(x, axis=0), np.cov(x.T, ddof=0)
assert_allclose(mean_est, mean_ref, atol=1e-15)
assert_allclose(cov_est, cov_ref, rtol=1e-15)
class TestMatrixNormal:
def test_bad_input(self):
# Check that bad inputs raise errors
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
# Incorrect dimensions
assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
assert_raises(ValueError, matrix_normal, M, U, U)
assert_raises(ValueError, matrix_normal, M, V, V)
assert_raises(ValueError, matrix_normal, M.T, U, V)
e = np.linalg.LinAlgError
# Singular covariance for the rvs method of a non-frozen instance
assert_raises(e, matrix_normal.rvs,
M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal.rvs,
M, np.ones((num_rows, num_rows)), V)
# Singular covariance for a frozen instance
assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
def test_default_inputs(self):
# Check that default argument handling works
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
Z = np.zeros((num_rows, num_cols))
Zr = np.zeros((num_rows, 1))
Zc = np.zeros((1, num_cols))
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
I1 = np.identity(1)
assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U).shape,
(num_rows, 1))
assert_equal(matrix_normal.rvs(colcov=V).shape,
(1, num_cols))
assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal(mean=M).rowcov, Ir)
assert_equal(matrix_normal(mean=M).colcov, Ic)
assert_equal(matrix_normal(rowcov=U).mean, Zr)
assert_equal(matrix_normal(rowcov=U).colcov, I1)
assert_equal(matrix_normal(colcov=V).mean, Zc)
assert_equal(matrix_normal(colcov=V).rowcov, I1)
assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
def test_covariance_expansion(self):
# Check that covariance can be specified with scalar or vector
num_rows = 4
num_cols = 3
M = np.full((num_rows, num_cols), 0.3)
Uv = np.full(num_rows, 0.2)
Us = 0.2
Vv = np.full(num_cols, 0.1)
Vs = 0.1
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
0.1*Ic)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
0.1*Ic)
def test_frozen_matrix_normal(self):
for i in range(1,5):
for j in range(1,5):
M = np.full((i,j), 0.3)
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
random_state=1234)
assert_equal(rvs1, rvs2)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(pdf1, pdf2)
logpdf1 = frozen.logpdf(X)
logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(logpdf1, logpdf2)
def test_matches_multivariate(self):
# Check that the pdfs match those obtained by vectorising and
# treating as a multivariate normal.
for i in range(1,5):
for j in range(1,5):
M = np.full((i,j), 0.3)
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
logpdf1 = frozen.logpdf(X)
entropy1 = frozen.entropy()
vecX = X.T.flatten()
vecM = M.T.flatten()
cov = np.kron(V,U)
pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
entropy2 = multivariate_normal.entropy(mean=vecM, cov=cov)
assert_allclose(pdf1, pdf2, rtol=1E-10)
assert_allclose(logpdf1, logpdf2, rtol=1E-10)
assert_allclose(entropy1, entropy2)
def test_array_input(self):
# Check array of inputs has the same output as the separate entries.
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
N = 10
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X1 = frozen.rvs(size=N, random_state=1234)
X2 = frozen.rvs(size=N, random_state=4321)
X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
assert_equal(X.shape, (2, N, num_rows, num_cols))
array_logpdf = frozen.logpdf(X)
assert_equal(array_logpdf.shape, (2, N))
for i in range(2):
for j in range(N):
separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
rowcov=U, colcov=V)
assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
def test_moments(self):
# Check that the sample moments match the parameters
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
N = 1000
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(size=N, random_state=1234)
sample_mean = np.mean(X,axis=0)
assert_allclose(sample_mean, M, atol=0.1)
sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
assert_allclose(sample_colcov, V, atol=0.1)
sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
N*num_cols,num_rows).T)
assert_allclose(sample_rowcov, U, atol=0.1)
def test_samples(self):
# Regression test to ensure that we always generate the same stream of
# random variates.
actual = matrix_normal.rvs(
mean=np.array([[1, 2], [3, 4]]),
rowcov=np.array([[4, -1], [-1, 2]]),
colcov=np.array([[5, 1], [1, 10]]),
random_state=np.random.default_rng(0),
size=2
)
expected = np.array(
[[[1.56228264238181, -1.24136424071189],
[2.46865788392114, 6.22964440489445]],
[[3.86405716144353, 10.73714311429529],
[2.59428444080606, 5.79987854490876]]]
)
assert_allclose(actual, expected)
class TestDirichlet:
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
# don't pass invalid alpha to np.random.dirichlet
x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
# don't pass invalid alpha to np.random.dirichlet
x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
alpha = np.array([1.0, 1.0, 1.0, 1.0])
assert_almost_equal(dirichlet.pdf(x, alpha), 6)
assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6))
def test_data_with_zeros_and_small_alpha(self):
alpha = np.array([1.0, 0.5, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.full((2, 7, 7), 1 / 14)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.full((2, 2, 7), 1 / 4)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.full((3, 7), 1 / 3)
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.full((3, 7), 1 / 2)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.full((2, 7), 1 / 2)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.full((5, 7), 1 / 5)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_mean_var_cov(self):
# Reference values calculated by hand and confirmed with Mathematica, e.g.
# `Covariance[DirichletDistribution[{ 1, 0.8, 0.2, 10^-300}]]`
alpha = np.array([1., 0.8, 0.2])
d = dirichlet(alpha)
expected_mean = [0.5, 0.4, 0.1]
expected_var = [1. / 12., 0.08, 0.03]
expected_cov = [
[ 1. / 12, -1. / 15, -1. / 60],
[-1. / 15, 2. / 25, -1. / 75],
[-1. / 60, -1. / 75, 3. / 100],
]
assert_array_almost_equal(d.mean(), expected_mean)
assert_array_almost_equal(d.var(), expected_var)
assert_array_almost_equal(d.cov(), expected_cov)
def test_scalar_values(self):
alpha = np.array([0.2])
d = dirichlet(alpha)
# For alpha of length 1, mean and var should be scalar instead of array
assert_equal(d.mean().ndim, 0)
assert_equal(d.var().ndim, 0)
assert_equal(d.pdf([1.]).ndim, 0)
assert_equal(d.logpdf([1.]).ndim, 0)
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart:
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim - 1
assert_raises(ValueError, wishart, 1, np.eye(2))
# But we can call with dim - 1 < df < dim
wishart(1.1, np.eye(2)) # no error
# see gh-5562
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestMultinomial:
def test_logpmf(self):
vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7))
assert_allclose(vals1, -1.483270127243324, rtol=1e-8)
vals2 = multinomial.logpmf([3, 4], 0, [.3, .7])
assert vals2 == -np.inf
vals3 = multinomial.logpmf([0, 0], 0, [.3, .7])
assert vals3 == 0
vals4 = multinomial.logpmf([3, 4], 0, [-2, 3])
assert_allclose(vals4, np.nan, rtol=1e-8)
def test_reduces_binomial(self):
# test that the multinomial pmf reduces to the binomial pmf in the 2d
# case
val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7))
val2 = binom.logpmf(3, 7, 0.3)
assert_allclose(val1, val2, rtol=1e-8)
val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9))
val2 = binom.pmf(6, 14, 0.1)
assert_allclose(val1, val2, rtol=1e-8)
def test_R(self):
# test against the values produced by this R code
# (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html)
# X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
# X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
# X
# apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5)))
n, p = 3, [1./8, 2./8, 5./8]
r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375,
(2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125,
(0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500,
(2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500,
(1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000}
for x in r_vals:
assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14)
@pytest.mark.parametrize("n", [0, 3])
def test_rvs_np(self, n):
# test that .rvs agrees w/numpy
sc_rvs = multinomial.rvs(n, [1/4.]*3, size=7, random_state=123)
rndm = np.random.RandomState(123)
np_rvs = rndm.multinomial(n, [1/4.]*3, size=7)
assert_equal(sc_rvs, np_rvs)
def test_pmf(self):
vals0 = multinomial.pmf((5,), 5, (1,))
assert_allclose(vals0, 1, rtol=1e-8)
vals1 = multinomial.pmf((3,4), 7, (.3, .7))
assert_allclose(vals1, .22689449999999994, rtol=1e-8)
vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8,
(.1, .9))
assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8)
x = np.empty((0,2), dtype=np.float64)
vals3 = multinomial.pmf(x, 4, (.3, .7))
assert_equal(vals3, np.empty([], dtype=np.float64))
vals4 = multinomial.pmf([1,2], 4, (.3, .7))
assert_allclose(vals4, 0, rtol=1e-8)
vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0])
assert_allclose(vals5, 0.219478737997, rtol=1e-8)
vals5 = multinomial.pmf([0, 0, 0], 0, [2/3.0, 1/3.0, 0])
assert vals5 == 1
vals6 = multinomial.pmf([2, 1, 0], 0, [2/3.0, 1/3.0, 0])
assert vals6 == 0
def test_pmf_broadcasting(self):
vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]])
assert_allclose(vals0, [.243, .384], rtol=1e-8)
vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9])
assert_allclose(vals1, [.243, 0], rtol=1e-8)
vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9])
assert_allclose(vals2, [[.243, 0]], rtol=1e-8)
vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9])
assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8)
vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9])
assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8)
@pytest.mark.parametrize("n", [0, 5])
def test_cov(self, n):
cov1 = multinomial.cov(n, (.2, .3, .5))
cov2 = [[n*.2*.8, -n*.2*.3, -n*.2*.5],
[-n*.3*.2, n*.3*.7, -n*.3*.5],
[-n*.5*.2, -n*.5*.3, n*.5*.5]]
assert_allclose(cov1, cov2, rtol=1e-8)
def test_cov_broadcasting(self):
cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]])
cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]]
assert_allclose(cov1, cov2, rtol=1e-8)
cov3 = multinomial.cov([4, 5], [.1, .9])
cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]]
assert_allclose(cov3, cov4, rtol=1e-8)
cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]],
[[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]]
assert_allclose(cov5, cov6, rtol=1e-8)
@pytest.mark.parametrize("n", [0, 2])
def test_entropy(self, n):
# this is equivalent to a binomial distribution with n=2, so the
# entropy .77899774929 is easily computed "by hand"
ent0 = multinomial.entropy(n, [.2, .8])
assert_allclose(ent0, binom.entropy(n, .2), rtol=1e-8)
def test_entropy_broadcasting(self):
ent0 = multinomial.entropy([2, 3], [.2, .3])
assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)],
rtol=1e-8)
ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]])
assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)],
rtol=1e-8)
ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]])
assert_allclose(ent2,
[[binom.entropy(7, .3), binom.entropy(7, .4)],
[binom.entropy(8, .3), binom.entropy(8, .4)]],
rtol=1e-8)
@pytest.mark.parametrize("n", [0, 5])
def test_mean(self, n):
mean1 = multinomial.mean(n, [.2, .8])
assert_allclose(mean1, [n*.2, n*.8], rtol=1e-8)
def test_mean_broadcasting(self):
mean1 = multinomial.mean([5, 6], [.2, .8])
assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
n = 12
pvals = (.1, .2, .3, .4)
x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]]
x = np.asarray(x, dtype=np.float64)
mn_frozen = multinomial(n, pvals)
assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals))
assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals))
assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals))
def test_gh_11860(self):
# gh-11860 reported cases in which the adjustments made by multinomial
# to the last element of `p` can cause `nan`s even when the input is
# essentially valid. Check that a pathological case returns a finite,
# nonzero result. (This would fail in main before the PR.)
n = 88
rng = np.random.default_rng(8879715917488330089)
p = rng.random(n)
p[-1] = 1e-30
p /= np.sum(p)
x = np.ones(n)
logpmf = multinomial.logpmf(x, n, p)
assert np.isfinite(logpmf)
class TestInvwishart:
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf, entropy
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
# entropy
assert_allclose(iw.entropy(), ig.entropy())
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def test_cho_inv_batch(self):
"""Regression test for gh-8844."""
a0 = np.array([[2, 1, 0, 0.5],
[1, 2, 0.5, 0.5],
[0, 0.5, 3, 1],
[0.5, 0.5, 1, 2]])
a1 = np.array([[2, -1, 0, 0.5],
[-1, 2, 0.5, 0.5],
[0, 0.5, 3, 1],
[0.5, 0.5, 1, 4]])
a = np.array([a0, a1])
ainv = a.copy()
_cho_inv_batch(ainv)
ident = np.eye(4)
assert_allclose(a[0].dot(ainv[0]), ident, atol=1e-15)
assert_allclose(a[1].dot(ainv[1]), ident, atol=1e-15)
def test_logpdf_4x4(self):
"""Regression test for gh-8844."""
X = np.array([[2, 1, 0, 0.5],
[1, 2, 0.5, 0.5],
[0, 0.5, 3, 1],
[0.5, 0.5, 1, 2]])
Psi = np.array([[9, 7, 3, 1],
[7, 9, 5, 1],
[3, 5, 8, 2],
[1, 1, 2, 9]])
nu = 6
prob = invwishart.logpdf(X, nu, Psi)
# Explicit calculation from the formula on wikipedia.
p = X.shape[0]
sig, logdetX = np.linalg.slogdet(X)
sig, logdetPsi = np.linalg.slogdet(Psi)
M = np.linalg.solve(X, Psi)
expected = ((nu/2)*logdetPsi
- (nu*p/2)*np.log(2)
- multigammaln(nu/2, p)
- (nu + p + 1)/2*logdetX
- 0.5*M.trace())
assert_allclose(prob, expected)
class TestSpecialOrthoGroup:
def test_reproducibility(self):
np.random.seed(514)
x = special_ortho_group.rvs(3)
expected = np.array([[-0.99394515, -0.04527879, 0.10011432],
[0.04821555, -0.99846897, 0.02711042],
[0.09873351, 0.03177334, 0.99460653]])
assert_array_almost_equal(x, expected)
random_state = np.random.RandomState(seed=514)
x = special_ortho_group.rvs(3, random_state=random_state)
assert_array_almost_equal(x, expected)
def test_invalid_dim(self):
assert_raises(ValueError, special_ortho_group.rvs, None)
assert_raises(ValueError, special_ortho_group.rvs, (2, 2))
assert_raises(ValueError, special_ortho_group.rvs, 1)
assert_raises(ValueError, special_ortho_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = special_ortho_group(dim)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = special_ortho_group.rvs(dim, random_state=1234)
assert_equal(rvs1, rvs2)
def test_det_and_ortho(self):
xs = [special_ortho_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that determinants are always +1
dets = [np.linalg.det(x) for x in xs]
assert_allclose(dets, [1.]*30, rtol=1e-13)
# Test that these are orthogonal matrices
for x in xs:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = .05
np.random.seed(514)
xs = special_ortho_group.rvs(dim, size=samples)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same disribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els}
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
class TestOrthoGroup:
def test_reproducibility(self):
seed = 514
np.random.seed(seed)
x = ortho_group.rvs(3)
x2 = ortho_group.rvs(3, random_state=seed)
# Note this matrix has det -1, distinguishing O(N) from SO(N)
assert_almost_equal(np.linalg.det(x), -1)
expected = np.array([[0.381686, -0.090374, 0.919863],
[0.905794, -0.161537, -0.391718],
[-0.183993, -0.98272, -0.020204]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, ortho_group.rvs, None)
assert_raises(ValueError, ortho_group.rvs, (2, 2))
assert_raises(ValueError, ortho_group.rvs, 1)
assert_raises(ValueError, ortho_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = ortho_group(dim)
frozen_seed = ortho_group(dim, seed=1234)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = ortho_group.rvs(dim, random_state=1234)
rvs3 = frozen_seed.rvs(size=1)
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
def test_det_and_ortho(self):
xs = [[ortho_group.rvs(dim)
for i in range(10)]
for dim in range(2,12)]
# Test that abs determinants are always +1
dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs])
assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13)
# Test that these are orthogonal matrices
for xx in xs:
for x in xx:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
@pytest.mark.parametrize("dim", [2, 5, 10, 20])
def test_det_distribution_gh18272(self, dim):
# Test that positive and negative determinants are equally likely.
rng = np.random.default_rng(6796248956179332344)
dist = ortho_group(dim=dim)
rvs = dist.rvs(size=5000, random_state=rng)
dets = scipy.linalg.det(rvs)
k = np.sum(dets > 0)
n = len(dets)
res = stats.binomtest(k, n)
low, high = res.proportion_ci(confidence_level=0.95)
assert low < 0.5 < high
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = .05
np.random.seed(518) # Note that the test is sensitive to seed too
xs = ortho_group.rvs(dim, size=samples)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same disribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els}
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
@pytest.mark.slow
def test_pairwise_distances(self):
# Test that the distribution of pairwise distances is close to correct.
np.random.seed(514)
def random_ortho(dim):
u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim)))
return np.dot(u, v)
for dim in range(2, 6):
def generate_test_statistics(rvs, N=1000, eps=1e-10):
stats = np.array([
np.sum((rvs(dim=dim) - rvs(dim=dim))**2)
for _ in range(N)
])
# Add a bit of noise to account for numeric accuracy.
stats += np.random.uniform(-eps, eps, size=stats.shape)
return stats
expected = generate_test_statistics(random_ortho)
actual = generate_test_statistics(scipy.stats.ortho_group.rvs)
_D, p = scipy.stats.ks_2samp(expected, actual)
assert_array_less(.05, p)
class TestRandomCorrelation:
def test_reproducibility(self):
np.random.seed(514)
eigs = (.5, .8, 1.2, 1.5)
x = random_correlation.rvs(eigs)
x2 = random_correlation.rvs(eigs, random_state=514)
expected = np.array([[1., -0.184851, 0.109017, -0.227494],
[-0.184851, 1., 0.231236, 0.326669],
[0.109017, 0.231236, 1., -0.178912],
[-0.227494, 0.326669, -0.178912, 1.]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_eigs(self):
assert_raises(ValueError, random_correlation.rvs, None)
assert_raises(ValueError, random_correlation.rvs, 'test')
assert_raises(ValueError, random_correlation.rvs, 2.5)
assert_raises(ValueError, random_correlation.rvs, [2.5])
assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]])
assert_raises(ValueError, random_correlation.rvs, [2.5, -.5])
assert_raises(ValueError, random_correlation.rvs, [1, 2, .1])
def test_frozen_matrix(self):
eigs = (.5, .8, 1.2, 1.5)
frozen = random_correlation(eigs)
frozen_seed = random_correlation(eigs, seed=514)
rvs1 = random_correlation.rvs(eigs, random_state=514)
rvs2 = frozen.rvs(random_state=514)
rvs3 = frozen_seed.rvs()
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
def test_definition(self):
# Test the definition of a correlation matrix in several dimensions:
#
# 1. Det is product of eigenvalues (and positive by construction
# in examples)
# 2. 1's on diagonal
# 3. Matrix is symmetric
def norm(i, e):
return i*e/sum(e)
np.random.seed(123)
eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)]
eigs.append([4,0,0,0])
ones = [[1.]*len(e) for e in eigs]
xs = [random_correlation.rvs(e) for e in eigs]
# Test that determinants are products of eigenvalues
# These are positive by construction
# Could also test that the eigenvalues themselves are correct,
# but this seems sufficient.
dets = [np.fabs(np.linalg.det(x)) for x in xs]
dets_known = [np.prod(e) for e in eigs]
assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)
# Test for 1's on the diagonal
diags = [np.diag(x) for x in xs]
for a, b in zip(diags, ones):
assert_allclose(a, b, rtol=1e-13)
# Correlation matrices are symmetric
for x in xs:
assert_allclose(x, x.T, rtol=1e-13)
def test_to_corr(self):
# Check some corner cases in to_corr
# ajj == 1
m = np.array([[0.1, 0], [0, 1]], dtype=float)
m = random_correlation._to_corr(m)
assert_allclose(m, np.array([[1, 0], [0, 0.1]]))
# Floating point overflow; fails to compute the correct
# rotation, but should still produce some valid rotation
# rather than infs/nans
with np.errstate(over='ignore'):
g = np.array([[0, 1], [-1, 0]])
m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m, g.T.dot(m0).dot(g))
m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m, g.T.dot(m0).dot(g))
# Zero discriminant; should set the first diag entry to 1
m0 = np.array([[2, 1], [1, 2]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m[0,0], 1)
# Slightly negative discriminant; should be approx correct still
m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m[0,0], 1)
class TestUniformDirection:
@pytest.mark.parametrize("dim", [1, 3])
@pytest.mark.parametrize("size", [None, 1, 5, (5, 4)])
def test_samples(self, dim, size):
# test that samples have correct shape and norm 1
rng = np.random.default_rng(2777937887058094419)
uniform_direction_dist = uniform_direction(dim, seed=rng)
samples = uniform_direction_dist.rvs(size)
mean, cov = np.zeros(dim), np.eye(dim)
expected_shape = rng.multivariate_normal(mean, cov, size=size).shape
assert samples.shape == expected_shape
norms = np.linalg.norm(samples, axis=-1)
assert_allclose(norms, 1.)
@pytest.mark.parametrize("dim", [None, 0, (2, 2), 2.5])
def test_invalid_dim(self, dim):
message = ("Dimension of vector must be specified, "
"and must be an integer greater than 0.")
with pytest.raises(ValueError, match=message):
uniform_direction.rvs(dim)
def test_frozen_distribution(self):
dim = 5
frozen = uniform_direction(dim)
frozen_seed = uniform_direction(dim, seed=514)
rvs1 = frozen.rvs(random_state=514)
rvs2 = uniform_direction.rvs(dim, random_state=514)
rvs3 = frozen_seed.rvs()
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
@pytest.mark.parametrize("dim", [2, 5, 8])
def test_uniform(self, dim):
rng = np.random.default_rng(1036978481269651776)
spherical_dist = uniform_direction(dim, seed=rng)
# generate random, orthogonal vectors
v1, v2 = spherical_dist.rvs(size=2)
v2 -= v1 @ v2 * v1
v2 /= np.linalg.norm(v2)
assert_allclose(v1 @ v2, 0, atol=1e-14) # orthogonal
# generate data and project onto orthogonal vectors
samples = spherical_dist.rvs(size=10000)
s1 = samples @ v1
s2 = samples @ v2
angles = np.arctan2(s1, s2)
# test that angles follow a uniform distribution
# normalize angles to range [0, 1]
angles += np.pi
angles /= 2*np.pi
# perform KS test
uniform_dist = uniform()
kstest_result = kstest(angles, uniform_dist.cdf)
assert kstest_result.pvalue > 0.05
class TestUnitaryGroup:
def test_reproducibility(self):
np.random.seed(514)
x = unitary_group.rvs(3)
x2 = unitary_group.rvs(3, random_state=514)
expected = np.array([[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j],
[0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j],
[-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, unitary_group.rvs, None)
assert_raises(ValueError, unitary_group.rvs, (2, 2))
assert_raises(ValueError, unitary_group.rvs, 1)
assert_raises(ValueError, unitary_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = unitary_group(dim)
frozen_seed = unitary_group(dim, seed=514)
rvs1 = frozen.rvs(random_state=514)
rvs2 = unitary_group.rvs(dim, random_state=514)
rvs3 = frozen_seed.rvs(size=1)
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
def test_unitarity(self):
xs = [unitary_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that these are unitary matrices
for x in xs:
assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15)
def test_haar(self):
# Test that the eigenvalues, which lie on the unit circle in
# the complex plane, are uncorrelated.
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
np.random.seed(514) # Note that the test is sensitive to seed too
xs = unitary_group.rvs(dim, size=samples)
# The angles "x" of the eigenvalues should be uniformly distributed
# Overall this seems to be a necessary but weak test of the distribution.
eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
x = np.arctan2(eigs.imag, eigs.real)
res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
assert_(res.pvalue > 0.05)
class TestMultivariateT:
# These tests were created by running vpa(mvtpdf(...)) in MATLAB. The
# function takes no `mu` parameter. The tests were run as
#
# >> ans = vpa(mvtpdf(x - mu, shape, df));
#
PDF_TESTS = [(
# x
[
[1, 2],
[4, 1],
[2, 1],
[2, 4],
[1, 4],
[4, 1],
[3, 2],
[3, 3],
[4, 4],
[5, 1],
],
# loc
[0, 0],
# shape
[
[1, 0],
[0, 1]
],
# df
4,
# ans
[
0.013972450422333741737457302178882,
0.0010998721906793330026219646100571,
0.013972450422333741737457302178882,
0.00073682844024025606101402363634634,
0.0010998721906793330026219646100571,
0.0010998721906793330026219646100571,
0.0020732579600816823488240725481546,
0.00095660371505271429414668515889275,
0.00021831953784896498569831346792114,
0.00037725616140301147447000396084604
]
), (
# x
[
[0.9718, 0.1298, 0.8134],
[0.4922, 0.5522, 0.7185],
[0.3010, 0.1491, 0.5008],
[0.5971, 0.2585, 0.8940],
[0.5434, 0.5287, 0.9507],
],
# loc
[-1, 1, 50],
# shape
[
[1.0000, 0.5000, 0.2500],
[0.5000, 1.0000, -0.1000],
[0.2500, -0.1000, 1.0000],
],
# df
8,
# ans
[
0.00000000000000069609279697467772867405511133763,
0.00000000000000073700739052207366474839369535934,
0.00000000000000069522909962669171512174435447027,
0.00000000000000074212293557998314091880208889767,
0.00000000000000077039675154022118593323030449058,
]
)]
@pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS)
def test_pdf_correctness(self, x, loc, shape, df, ans):
dist = multivariate_t(loc, shape, df, seed=0)
val = dist.pdf(x)
assert_array_almost_equal(val, ans)
@pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS)
def test_logpdf_correct(self, x, loc, shape, df, ans):
dist = multivariate_t(loc, shape, df, seed=0)
val1 = dist.pdf(x)
val2 = dist.logpdf(x)
assert_array_almost_equal(np.log(val1), val2)
# https://github.com/scipy/scipy/issues/10042#issuecomment-576795195
def test_mvt_with_df_one_is_cauchy(self):
x = [9, 7, 4, 1, -3, 9, 0, -3, -1, 3]
val = multivariate_t.pdf(x, df=1)
ans = cauchy.pdf(x)
assert_array_almost_equal(val, ans)
def test_mvt_with_high_df_is_approx_normal(self):
# `normaltest` returns the chi-squared statistic and the associated
# p-value. The null hypothesis is that `x` came from a normal
# distribution, so a low p-value represents rejecting the null, i.e.
# that it is unlikely that `x` came a normal distribution.
P_VAL_MIN = 0.1
dist = multivariate_t(0, 1, df=100000, seed=1)
samples = dist.rvs(size=100000)
_, p = normaltest(samples)
assert (p > P_VAL_MIN)
dist = multivariate_t([-2, 3], [[10, -1], [-1, 10]], df=100000,
seed=42)
samples = dist.rvs(size=100000)
_, p = normaltest(samples)
assert ((p > P_VAL_MIN).all())
@patch('scipy.stats.multivariate_normal._logpdf')
def test_mvt_with_inf_df_calls_normal(self, mock):
dist = multivariate_t(0, 1, df=np.inf, seed=7)
assert isinstance(dist, multivariate_normal_frozen)
multivariate_t.pdf(0, df=np.inf)
assert mock.call_count == 1
multivariate_t.logpdf(0, df=np.inf)
assert mock.call_count == 2
def test_shape_correctness(self):
# pdf and logpdf should return scalar when the
# number of samples in x is one.
dim = 4
loc = np.zeros(dim)
shape = np.eye(dim)
df = 4.5
x = np.zeros(dim)
res = multivariate_t(loc, shape, df).pdf(x)
assert np.isscalar(res)
res = multivariate_t(loc, shape, df).logpdf(x)
assert np.isscalar(res)
# pdf() and logpdf() should return probabilities of shape
# (n_samples,) when x has n_samples.
n_samples = 7
x = np.random.random((n_samples, dim))
res = multivariate_t(loc, shape, df).pdf(x)
assert (res.shape == (n_samples,))
res = multivariate_t(loc, shape, df).logpdf(x)
assert (res.shape == (n_samples,))
# rvs() should return scalar unless a size argument is applied.
res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs()
assert np.isscalar(res)
# rvs() should return vector of shape (size,) if size argument
# is applied.
size = 7
res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs(size=size)
assert (res.shape == (size,))
def test_default_arguments(self):
dist = multivariate_t()
assert_equal(dist.loc, [0])
assert_equal(dist.shape, [[1]])
assert (dist.df == 1)
DEFAULT_ARGS_TESTS = [
(None, None, None, 0, 1, 1),
(None, None, 7, 0, 1, 7),
(None, [[7, 0], [0, 7]], None, [0, 0], [[7, 0], [0, 7]], 1),
(None, [[7, 0], [0, 7]], 7, [0, 0], [[7, 0], [0, 7]], 7),
([7, 7], None, None, [7, 7], [[1, 0], [0, 1]], 1),
([7, 7], None, 7, [7, 7], [[1, 0], [0, 1]], 7),
([7, 7], [[7, 0], [0, 7]], None, [7, 7], [[7, 0], [0, 7]], 1),
([7, 7], [[7, 0], [0, 7]], 7, [7, 7], [[7, 0], [0, 7]], 7)
]
@pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans", DEFAULT_ARGS_TESTS)
def test_default_args(self, loc, shape, df, loc_ans, shape_ans, df_ans):
dist = multivariate_t(loc=loc, shape=shape, df=df)
assert_equal(dist.loc, loc_ans)
assert_equal(dist.shape, shape_ans)
assert (dist.df == df_ans)
ARGS_SHAPES_TESTS = [
(-1, 2, 3, [-1], [[2]], 3),
([-1], [2], 3, [-1], [[2]], 3),
(np.array([-1]), np.array([2]), 3, [-1], [[2]], 3)
]
@pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans", ARGS_SHAPES_TESTS)
def test_scalar_list_and_ndarray_arguments(self, loc, shape, df, loc_ans, shape_ans, df_ans):
dist = multivariate_t(loc, shape, df)
assert_equal(dist.loc, loc_ans)
assert_equal(dist.shape, shape_ans)
assert_equal(dist.df, df_ans)
def test_argument_error_handling(self):
# `loc` should be a one-dimensional vector.
loc = [[1, 1]]
assert_raises(ValueError,
multivariate_t,
**dict(loc=loc))
# `shape` should be scalar or square matrix.
shape = [[1, 1], [2, 2], [3, 3]]
assert_raises(ValueError,
multivariate_t,
**dict(loc=loc, shape=shape))
# `df` should be greater than zero.
loc = np.zeros(2)
shape = np.eye(2)
df = -1
assert_raises(ValueError,
multivariate_t,
**dict(loc=loc, shape=shape, df=df))
df = 0
assert_raises(ValueError,
multivariate_t,
**dict(loc=loc, shape=shape, df=df))
def test_reproducibility(self):
rng = np.random.RandomState(4)
loc = rng.uniform(size=3)
shape = np.eye(3)
dist1 = multivariate_t(loc, shape, df=3, seed=2)
dist2 = multivariate_t(loc, shape, df=3, seed=2)
samples1 = dist1.rvs(size=10)
samples2 = dist2.rvs(size=10)
assert_equal(samples1, samples2)
def test_allow_singular(self):
# Make shape singular and verify error was raised.
args = dict(loc=[0,0], shape=[[0,0],[0,1]], df=1, allow_singular=False)
assert_raises(np.linalg.LinAlgError, multivariate_t, **args)
@pytest.mark.parametrize("size", [(10, 3), (5, 6, 4, 3)])
@pytest.mark.parametrize("dim", [2, 3, 4, 5])
@pytest.mark.parametrize("df", [1., 2., np.inf])
def test_rvs(self, size, dim, df):
dist = multivariate_t(np.zeros(dim), np.eye(dim), df)
rvs = dist.rvs(size=size)
assert rvs.shape == size + (dim, )
def test_cdf_signs(self):
# check that sign of output is correct when np.any(lower > x)
mean = np.zeros(3)
cov = np.eye(3)
df = 10
b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]]
a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]
# when odd number of elements of b < a, output is negative
expected_signs = np.array([1, -1, -1, 1])
cdf = multivariate_normal.cdf(b, mean, cov, df, lower_limit=a)
assert_allclose(cdf, cdf[0]*expected_signs)
@pytest.mark.parametrize('dim', [1, 2, 5, 10])
def test_cdf_against_multivariate_normal(self, dim):
# Check accuracy against MVN randomly-generated cases
self.cdf_against_mvn_test(dim)
@pytest.mark.parametrize('dim', [3, 6, 9])
def test_cdf_against_multivariate_normal_singular(self, dim):
# Check accuracy against MVN for randomly-generated singular cases
self.cdf_against_mvn_test(3, True)
def cdf_against_mvn_test(self, dim, singular=False):
# Check for accuracy in the limit that df -> oo and MVT -> MVN
rng = np.random.default_rng(413722918996573)
n = 3
w = 10**rng.uniform(-2, 1, size=dim)
cov = _random_covariance(dim, w, rng, singular)
mean = 10**rng.uniform(-1, 2, size=dim) * np.sign(rng.normal(size=dim))
a = -10**rng.uniform(-1, 2, size=(n, dim)) + mean
b = 10**rng.uniform(-1, 2, size=(n, dim)) + mean
res = stats.multivariate_t.cdf(b, mean, cov, df=10000, lower_limit=a,
allow_singular=True, random_state=rng)
ref = stats.multivariate_normal.cdf(b, mean, cov, allow_singular=True,
lower_limit=a)
assert_allclose(res, ref, atol=5e-4)
def test_cdf_against_univariate_t(self):
rng = np.random.default_rng(413722918996573)
cov = 2
mean = 0
x = rng.normal(size=10, scale=np.sqrt(cov))
df = 3
res = stats.multivariate_t.cdf(x, mean, cov, df, lower_limit=-np.inf,
random_state=rng)
ref = stats.t.cdf(x, df, mean, np.sqrt(cov))
incorrect = stats.norm.cdf(x, mean, np.sqrt(cov))
assert_allclose(res, ref, atol=5e-4) # close to t
assert np.all(np.abs(res - incorrect) > 1e-3) # not close to normal
@pytest.mark.parametrize("dim", [2, 3, 5, 10])
@pytest.mark.parametrize("seed", [3363958638, 7891119608, 3887698049,
5013150848, 1495033423, 6170824608])
@pytest.mark.parametrize("singular", [False, True])
def test_cdf_against_qsimvtv(self, dim, seed, singular):
if singular and seed != 3363958638:
pytest.skip('Agreement with qsimvtv is not great in singular case')
rng = np.random.default_rng(seed)
w = 10**rng.uniform(-2, 2, size=dim)
cov = _random_covariance(dim, w, rng, singular)
mean = rng.random(dim)
a = -rng.random(dim)
b = rng.random(dim)
df = rng.random() * 5
# no lower limit
res = stats.multivariate_t.cdf(b, mean, cov, df, random_state=rng,
allow_singular=True)
with np.errstate(invalid='ignore'):
ref = _qsimvtv(20000, df, cov, np.inf*a, b - mean, rng)[0]
assert_allclose(res, ref, atol=2e-4, rtol=1e-3)
# with lower limit
res = stats.multivariate_t.cdf(b, mean, cov, df, lower_limit=a,
random_state=rng, allow_singular=True)
with np.errstate(invalid='ignore'):
ref = _qsimvtv(20000, df, cov, a - mean, b - mean, rng)[0]
assert_allclose(res, ref, atol=1e-4, rtol=1e-3)
def test_cdf_against_generic_integrators(self):
# Compare result against generic numerical integrators
dim = 3
rng = np.random.default_rng(41372291899657)
w = 10 ** rng.uniform(-1, 1, size=dim)
cov = _random_covariance(dim, w, rng, singular=True)
mean = rng.random(dim)
a = -rng.random(dim)
b = rng.random(dim)
df = rng.random() * 5
res = stats.multivariate_t.cdf(b, mean, cov, df, random_state=rng,
lower_limit=a)
def integrand(x):
return stats.multivariate_t.pdf(x.T, mean, cov, df)
ref = qmc_quad(integrand, a, b, qrng=stats.qmc.Halton(d=dim, seed=rng))
assert_allclose(res, ref.integral, rtol=1e-3)
def integrand(*zyx):
return stats.multivariate_t.pdf(zyx[::-1], mean, cov, df)
ref = tplquad(integrand, a[0], b[0], a[1], b[1], a[2], b[2])
assert_allclose(res, ref[0], rtol=1e-3)
def test_against_matlab(self):
# Test against matlab mvtcdf:
# C = [6.21786909 0.2333667 7.95506077;
# 0.2333667 29.67390923 16.53946426;
# 7.95506077 16.53946426 19.17725252]
# df = 1.9559939787727658
# mvtcdf([0, 0, 0], C, df) % 0.2523
rng = np.random.default_rng(2967390923)
cov = np.array([[ 6.21786909, 0.2333667 , 7.95506077],
[ 0.2333667 , 29.67390923, 16.53946426],
[ 7.95506077, 16.53946426, 19.17725252]])
df = 1.9559939787727658
dist = stats.multivariate_t(shape=cov, df=df)
res = dist.cdf([0, 0, 0], random_state=rng)
ref = 0.2523
assert_allclose(res, ref, rtol=1e-3)
def test_frozen(self):
seed = 4137229573
rng = np.random.default_rng(seed)
loc = rng.uniform(size=3)
x = rng.uniform(size=3) + loc
shape = np.eye(3)
df = rng.random()
args = (loc, shape, df)
rng_frozen = np.random.default_rng(seed)
rng_unfrozen = np.random.default_rng(seed)
dist = stats.multivariate_t(*args, seed=rng_frozen)
assert_equal(dist.cdf(x),
multivariate_t.cdf(x, *args, random_state=rng_unfrozen))
def test_vectorized(self):
dim = 4
n = (2, 3)
rng = np.random.default_rng(413722918996573)
A = rng.random(size=(dim, dim))
cov = A @ A.T
mean = rng.random(dim)
x = rng.random(n + (dim,))
df = rng.random() * 5
res = stats.multivariate_t.cdf(x, mean, cov, df, random_state=rng)
def _cdf_1d(x):
return _qsimvtv(10000, df, cov, -np.inf*x, x-mean, rng)[0]
ref = np.apply_along_axis(_cdf_1d, -1, x)
assert_allclose(res, ref, atol=1e-4, rtol=1e-3)
@pytest.mark.parametrize("dim", (3, 7))
def test_against_analytical(self, dim):
rng = np.random.default_rng(413722918996573)
A = scipy.linalg.toeplitz(c=[1] + [0.5] * (dim - 1))
res = stats.multivariate_t(shape=A).cdf([0] * dim, random_state=rng)
ref = 1 / (dim + 1)
assert_allclose(res, ref, rtol=5e-5)
def test_entropy_inf_df(self):
cov = np.eye(3, 3)
df = np.inf
mvt_entropy = stats.multivariate_t.entropy(shape=cov, df=df)
mvn_entropy = stats.multivariate_normal.entropy(None, cov)
assert mvt_entropy == mvn_entropy
@pytest.mark.parametrize("df", [1, 10, 100])
def test_entropy_1d(self, df):
mvt_entropy = stats.multivariate_t.entropy(shape=1., df=df)
t_entropy = stats.t.entropy(df=df)
assert_allclose(mvt_entropy, t_entropy, rtol=1e-13)
# entropy reference values were computed via numerical integration
#
# def integrand(x, y, mvt):
# vec = np.array([x, y])
# return mvt.logpdf(vec) * mvt.pdf(vec)
# def multivariate_t_entropy_quad_2d(df, cov):
# dim = cov.shape[0]
# loc = np.zeros((dim, ))
# mvt = stats.multivariate_t(loc, cov, df)
# limit = 100
# return -integrate.dblquad(integrand, -limit, limit, -limit, limit,
# args=(mvt, ))[0]
@pytest.mark.parametrize("df, cov, ref, tol",
[(10, np.eye(2, 2), 3.0378770664093313, 1e-14),
(100, np.array([[0.5, 1], [1, 10]]),
3.55102424550609, 1e-8)])
def test_entropy_vs_numerical_integration(self, df, cov, ref, tol):
loc = np.zeros((2, ))
mvt = stats.multivariate_t(loc, cov, df)
assert_allclose(mvt.entropy(), ref, rtol=tol)
@pytest.mark.parametrize(
"df, dim, ref, tol",
[
(10, 1, 1.5212624929756808, 1e-15),
(100, 1, 1.4289633653182439, 1e-13),
(500, 1, 1.420939531869349, 1e-14),
(1e20, 1, 1.4189385332046727, 1e-15),
(1e100, 1, 1.4189385332046727, 1e-15),
(10, 10, 15.069150450832911, 1e-15),
(1000, 10, 14.19936546446673, 1e-13),
(1e20, 10, 14.189385332046728, 1e-15),
(1e100, 10, 14.189385332046728, 1e-15),
(10, 100, 148.28902883192654, 1e-15),
(1000, 100, 141.99155538003762, 1e-14),
(1e20, 100, 141.8938533204673, 1e-15),
(1e100, 100, 141.8938533204673, 1e-15),
]
)
def test_extreme_entropy(self, df, dim, ref, tol):
# Reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
#
# def mul_t_mpmath_entropy(dim, df=1):
# dim = mp.mpf(dim)
# df = mp.mpf(df)
# halfsum = (dim + df)/2
# half_df = df/2
#
# return float(
# -mp.loggamma(halfsum) + mp.loggamma(half_df)
# + dim / 2 * mp.log(df * mp.pi)
# + halfsum * (mp.digamma(halfsum) - mp.digamma(half_df))
# + 0.0
# )
mvt = stats.multivariate_t(shape=np.eye(dim), df=df)
assert_allclose(mvt.entropy(), ref, rtol=tol)
def test_entropy_with_covariance(self):
# Generated using np.randn(5, 5) and then rounding
# to two decimal places
_A = np.array([
[1.42, 0.09, -0.49, 0.17, 0.74],
[-1.13, -0.01, 0.71, 0.4, -0.56],
[1.07, 0.44, -0.28, -0.44, 0.29],
[-1.5, -0.94, -0.67, 0.73, -1.1],
[0.17, -0.08, 1.46, -0.32, 1.36]
])
# Set cov to be a symmetric positive semi-definite matrix
cov = _A @ _A.T
# Test the asymptotic case. For large degrees of freedom
# the entropy approaches the multivariate normal entropy.
df = 1e20
mul_t_entropy = stats.multivariate_t.entropy(shape=cov, df=df)
mul_norm_entropy = multivariate_normal(None, cov=cov).entropy()
assert_allclose(mul_t_entropy, mul_norm_entropy, rtol=1e-15)
# Test the regular case. For a dim of 5 the threshold comes out
# to be approximately 766.45. So using slightly
# different dfs on each site of the threshold, the entropies
# are being compared.
df1 = 765
df2 = 768
_entropy1 = stats.multivariate_t.entropy(shape=cov, df=df1)
_entropy2 = stats.multivariate_t.entropy(shape=cov, df=df2)
assert_allclose(_entropy1, _entropy2, rtol=1e-5)
class TestMultivariateHypergeom:
@pytest.mark.parametrize(
"x, m, n, expected",
[
# Ground truth value from R dmvhyper
([3, 4], [5, 10], 7, -1.119814),
# test for `n=0`
([3, 4], [5, 10], 0, -np.inf),
# test for `x < 0`
([-3, 4], [5, 10], 7, -np.inf),
# test for `m < 0` (RuntimeWarning issue)
([3, 4], [-5, 10], 7, np.nan),
# test for all `m < 0` and `x.sum() != n`
([[1, 2], [3, 4]], [[-4, -6], [-5, -10]],
[3, 7], [np.nan, np.nan]),
# test for `x < 0` and `m < 0` (RuntimeWarning issue)
([-3, 4], [-5, 10], 1, np.nan),
# test for `x > m`
([1, 11], [10, 1], 12, np.nan),
# test for `m < 0` (RuntimeWarning issue)
([1, 11], [10, -1], 12, np.nan),
# test for `n < 0`
([3, 4], [5, 10], -7, np.nan),
# test for `x.sum() != n`
([3, 3], [5, 10], 7, -np.inf)
]
)
def test_logpmf(self, x, m, n, expected):
vals = multivariate_hypergeom.logpmf(x, m, n)
assert_allclose(vals, expected, rtol=1e-6)
def test_reduces_hypergeom(self):
# test that the multivariate_hypergeom pmf reduces to the
# hypergeom pmf in the 2d case.
val1 = multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
val2 = hypergeom.pmf(k=3, M=15, n=4, N=10)
assert_allclose(val1, val2, rtol=1e-8)
val1 = multivariate_hypergeom.pmf(x=[7, 3], m=[15, 10], n=10)
val2 = hypergeom.pmf(k=7, M=25, n=10, N=15)
assert_allclose(val1, val2, rtol=1e-8)
def test_rvs(self):
# test if `rvs` is unbiased and large sample size converges
# to the true mean.
rv = multivariate_hypergeom(m=[3, 5], n=4)
rvs = rv.rvs(size=1000, random_state=123)
assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
def test_rvs_broadcasting(self):
rv = multivariate_hypergeom(m=[[3, 5], [5, 10]], n=[4, 9])
rvs = rv.rvs(size=(1000, 2), random_state=123)
assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
@pytest.mark.parametrize('m, n', (
([0, 0, 20, 0, 0], 5), ([0, 0, 0, 0, 0], 0),
([0, 0], 0), ([0], 0)
))
def test_rvs_gh16171(self, m, n):
res = multivariate_hypergeom.rvs(m, n)
m = np.asarray(m)
res_ex = m.copy()
res_ex[m != 0] = n
assert_equal(res, res_ex)
@pytest.mark.parametrize(
"x, m, n, expected",
[
([5], [5], 5, 1),
([3, 4], [5, 10], 7, 0.3263403),
# Ground truth value from R dmvhyper
([[[3, 5], [0, 8]], [[-1, 9], [1, 1]]],
[5, 10], [[8, 8], [8, 2]],
[[0.3916084, 0.006993007], [0, 0.4761905]]),
# test with empty arrays.
(np.array([], np.int_), np.array([], np.int_), 0, []),
([1, 2], [4, 5], 5, 0),
# Ground truth value from R dmvhyper
([3, 3, 0], [5, 6, 7], 6, 0.01077354)
]
)
def test_pmf(self, x, m, n, expected):
vals = multivariate_hypergeom.pmf(x, m, n)
assert_allclose(vals, expected, rtol=1e-7)
@pytest.mark.parametrize(
"x, m, n, expected",
[
([3, 4], [[5, 10], [10, 15]], 7, [0.3263403, 0.3407531]),
([[1], [2]], [[3], [4]], [1, 3], [1., 0.]),
([[[1], [2]]], [[3], [4]], [1, 3], [[1., 0.]]),
([[1], [2]], [[[[3]]]], [1, 3], [[[1., 0.]]])
]
)
def test_pmf_broadcasting(self, x, m, n, expected):
vals = multivariate_hypergeom.pmf(x, m, n)
assert_allclose(vals, expected, rtol=1e-7)
def test_cov(self):
cov1 = multivariate_hypergeom.cov(m=[3, 7, 10], n=12)
cov2 = [[0.64421053, -0.26526316, -0.37894737],
[-0.26526316, 1.14947368, -0.88421053],
[-0.37894737, -0.88421053, 1.26315789]]
assert_allclose(cov1, cov2, rtol=1e-8)
def test_cov_broadcasting(self):
cov1 = multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
cov2 = [[[1.05, -1.05], [-1.05, 1.05]],
[[1.56, -1.56], [-1.56, 1.56]]]
assert_allclose(cov1, cov2, rtol=1e-8)
cov3 = multivariate_hypergeom.cov(m=[[4], [5]], n=[4, 5])
cov4 = [[[0.]], [[0.]]]
assert_allclose(cov3, cov4, rtol=1e-8)
cov5 = multivariate_hypergeom.cov(m=[7, 9], n=[8, 12])
cov6 = [[[1.05, -1.05], [-1.05, 1.05]],
[[0.7875, -0.7875], [-0.7875, 0.7875]]]
assert_allclose(cov5, cov6, rtol=1e-8)
def test_var(self):
# test with hypergeom
var0 = multivariate_hypergeom.var(m=[10, 5], n=4)
var1 = hypergeom.var(M=15, n=4, N=10)
assert_allclose(var0, var1, rtol=1e-8)
def test_var_broadcasting(self):
var0 = multivariate_hypergeom.var(m=[10, 5], n=[4, 8])
var1 = multivariate_hypergeom.var(m=[10, 5], n=4)
var2 = multivariate_hypergeom.var(m=[10, 5], n=8)
assert_allclose(var0[0], var1, rtol=1e-8)
assert_allclose(var0[1], var2, rtol=1e-8)
var3 = multivariate_hypergeom.var(m=[[10, 5], [10, 14]], n=[4, 8])
var4 = [[0.6984127, 0.6984127], [1.352657, 1.352657]]
assert_allclose(var3, var4, rtol=1e-8)
var5 = multivariate_hypergeom.var(m=[[5], [10]], n=[5, 10])
var6 = [[0.], [0.]]
assert_allclose(var5, var6, rtol=1e-8)
def test_mean(self):
# test with hypergeom
mean0 = multivariate_hypergeom.mean(m=[10, 5], n=4)
mean1 = hypergeom.mean(M=15, n=4, N=10)
assert_allclose(mean0[0], mean1, rtol=1e-8)
mean2 = multivariate_hypergeom.mean(m=[12, 8], n=10)
mean3 = [12.*10./20., 8.*10./20.]
assert_allclose(mean2, mean3, rtol=1e-8)
def test_mean_broadcasting(self):
mean0 = multivariate_hypergeom.mean(m=[[3, 5], [10, 5]], n=[4, 8])
mean1 = [[3.*4./8., 5.*4./8.], [10.*8./15., 5.*8./15.]]
assert_allclose(mean0, mean1, rtol=1e-8)
def test_mean_edge_cases(self):
mean0 = multivariate_hypergeom.mean(m=[0, 0, 0], n=0)
assert_equal(mean0, [0., 0., 0.])
mean1 = multivariate_hypergeom.mean(m=[1, 0, 0], n=2)
assert_equal(mean1, [np.nan, np.nan, np.nan])
mean2 = multivariate_hypergeom.mean(m=[[1, 0, 0], [1, 0, 1]], n=2)
assert_allclose(mean2, [[np.nan, np.nan, np.nan], [1., 0., 1.]],
rtol=1e-17)
mean3 = multivariate_hypergeom.mean(m=np.array([], np.int_), n=0)
assert_equal(mean3, [])
assert_(mean3.shape == (0, ))
def test_var_edge_cases(self):
var0 = multivariate_hypergeom.var(m=[0, 0, 0], n=0)
assert_allclose(var0, [0., 0., 0.], rtol=1e-16)
var1 = multivariate_hypergeom.var(m=[1, 0, 0], n=2)
assert_equal(var1, [np.nan, np.nan, np.nan])
var2 = multivariate_hypergeom.var(m=[[1, 0, 0], [1, 0, 1]], n=2)
assert_allclose(var2, [[np.nan, np.nan, np.nan], [0., 0., 0.]],
rtol=1e-17)
var3 = multivariate_hypergeom.var(m=np.array([], np.int_), n=0)
assert_equal(var3, [])
assert_(var3.shape == (0, ))
def test_cov_edge_cases(self):
cov0 = multivariate_hypergeom.cov(m=[1, 0, 0], n=1)
cov1 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
assert_allclose(cov0, cov1, rtol=1e-17)
cov3 = multivariate_hypergeom.cov(m=[0, 0, 0], n=0)
cov4 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
assert_equal(cov3, cov4)
cov5 = multivariate_hypergeom.cov(m=np.array([], np.int_), n=0)
cov6 = np.array([], dtype=np.float_).reshape(0, 0)
assert_allclose(cov5, cov6, rtol=1e-17)
assert_(cov5.shape == (0, 0))
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
n = 12
m = [7, 9, 11, 13]
x = [[0, 0, 0, 12], [0, 0, 1, 11], [0, 1, 1, 10],
[1, 1, 1, 9], [1, 1, 2, 8]]
x = np.asarray(x, dtype=np.int_)
mhg_frozen = multivariate_hypergeom(m, n)
assert_allclose(mhg_frozen.pmf(x),
multivariate_hypergeom.pmf(x, m, n))
assert_allclose(mhg_frozen.logpmf(x),
multivariate_hypergeom.logpmf(x, m, n))
assert_allclose(mhg_frozen.var(), multivariate_hypergeom.var(m, n))
assert_allclose(mhg_frozen.cov(), multivariate_hypergeom.cov(m, n))
def test_invalid_params(self):
assert_raises(ValueError, multivariate_hypergeom.pmf, 5, 10, 5)
assert_raises(ValueError, multivariate_hypergeom.pmf, 5, [10], 5)
assert_raises(ValueError, multivariate_hypergeom.pmf, [5, 4], [10], 5)
assert_raises(TypeError, multivariate_hypergeom.pmf, [5.5, 4.5],
[10, 15], 5)
assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
[10.5, 15.5], 5)
assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
[10, 15], 5.5)
class TestRandomTable:
def get_rng(self):
return np.random.default_rng(628174795866951638)
def test_process_parameters(self):
message = "`row` must be one-dimensional"
with pytest.raises(ValueError, match=message):
random_table([[1, 2]], [1, 2])
message = "`col` must be one-dimensional"
with pytest.raises(ValueError, match=message):
random_table([1, 2], [[1, 2]])
message = "each element of `row` must be non-negative"
with pytest.raises(ValueError, match=message):
random_table([1, -1], [1, 2])
message = "each element of `col` must be non-negative"
with pytest.raises(ValueError, match=message):
random_table([1, 2], [1, -2])
message = "sums over `row` and `col` must be equal"
with pytest.raises(ValueError, match=message):
random_table([1, 2], [1, 0])
message = "each element of `row` must be an integer"
with pytest.raises(ValueError, match=message):
random_table([2.1, 2.1], [1, 1, 2])
message = "each element of `col` must be an integer"
with pytest.raises(ValueError, match=message):
random_table([1, 2], [1.1, 1.1, 1])
row = [1, 3]
col = [2, 1, 1]
r, c, n = random_table._process_parameters([1, 3], [2, 1, 1])
assert_equal(row, r)
assert_equal(col, c)
assert n == np.sum(row)
@pytest.mark.parametrize("scale,method",
((1, "boyett"), (100, "patefield")))
def test_process_rvs_method_on_None(self, scale, method):
row = np.array([1, 3]) * scale
col = np.array([2, 1, 1]) * scale
ct = random_table
expected = ct.rvs(row, col, method=method, random_state=1)
got = ct.rvs(row, col, method=None, random_state=1)
assert_equal(expected, got)
def test_process_rvs_method_bad_argument(self):
row = [1, 3]
col = [2, 1, 1]
# order of items in set is random, so cannot check that
message = "'foo' not recognized, must be one of"
with pytest.raises(ValueError, match=message):
random_table.rvs(row, col, method="foo")
@pytest.mark.parametrize('frozen', (True, False))
@pytest.mark.parametrize('log', (True, False))
def test_pmf_logpmf(self, frozen, log):
# The pmf is tested through random sample generation
# with Boyett's algorithm, whose implementation is simple
# enough to verify manually for correctness.
rng = self.get_rng()
row = [2, 6]
col = [1, 3, 4]
rvs = random_table.rvs(row, col, size=1000,
method="boyett", random_state=rng)
obj = random_table(row, col) if frozen else random_table
method = getattr(obj, "logpmf" if log else "pmf")
if not frozen:
original_method = method
def method(x):
return original_method(x, row, col)
pmf = (lambda x: np.exp(method(x))) if log else method
unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True)
# rough accuracy check
p = pmf(unique_rvs)
assert_allclose(p * len(rvs), counts, rtol=0.1)
# accept any iterable
p2 = pmf(list(unique_rvs[0]))
assert_equal(p2, p[0])
# accept high-dimensional input and 2d input
rvs_nd = rvs.reshape((10, 100) + rvs.shape[1:])
p = pmf(rvs_nd)
assert p.shape == (10, 100)
for i in range(p.shape[0]):
for j in range(p.shape[1]):
pij = p[i, j]
rvij = rvs_nd[i, j]
qij = pmf(rvij)
assert_equal(pij, qij)
# probability is zero if column marginal does not match
x = [[0, 1, 1], [2, 1, 3]]
assert_equal(np.sum(x, axis=-1), row)
p = pmf(x)
assert p == 0
# probability is zero if row marginal does not match
x = [[0, 1, 2], [1, 2, 2]]
assert_equal(np.sum(x, axis=-2), col)
p = pmf(x)
assert p == 0
# response to invalid inputs
message = "`x` must be at least two-dimensional"
with pytest.raises(ValueError, match=message):
pmf([1])
message = "`x` must contain only integral values"
with pytest.raises(ValueError, match=message):
pmf([[1.1]])
message = "`x` must contain only integral values"
with pytest.raises(ValueError, match=message):
pmf([[np.nan]])
message = "`x` must contain only non-negative values"
with pytest.raises(ValueError, match=message):
pmf([[-1]])
message = "shape of `x` must agree with `row`"
with pytest.raises(ValueError, match=message):
pmf([[1, 2, 3]])
message = "shape of `x` must agree with `col`"
with pytest.raises(ValueError, match=message):
pmf([[1, 2],
[3, 4]])
@pytest.mark.parametrize("method", ("boyett", "patefield"))
def test_rvs_mean(self, method):
# test if `rvs` is unbiased and large sample size converges
# to the true mean.
rng = self.get_rng()
row = [2, 6]
col = [1, 3, 4]
rvs = random_table.rvs(row, col, size=1000, method=method,
random_state=rng)
mean = random_table.mean(row, col)
assert_equal(np.sum(mean), np.sum(row))
assert_allclose(rvs.mean(0), mean, atol=0.05)
assert_equal(rvs.sum(axis=-1), np.broadcast_to(row, (1000, 2)))
assert_equal(rvs.sum(axis=-2), np.broadcast_to(col, (1000, 3)))
def test_rvs_cov(self):
# test if `rvs` generated with patefield and boyett algorithms
# produce approximately the same covariance matrix
rng = self.get_rng()
row = [2, 6]
col = [1, 3, 4]
rvs1 = random_table.rvs(row, col, size=10000, method="boyett",
random_state=rng)
rvs2 = random_table.rvs(row, col, size=10000, method="patefield",
random_state=rng)
cov1 = np.var(rvs1, axis=0)
cov2 = np.var(rvs2, axis=0)
assert_allclose(cov1, cov2, atol=0.02)
@pytest.mark.parametrize("method", ("boyett", "patefield"))
def test_rvs_size(self, method):
row = [2, 6]
col = [1, 3, 4]
# test size `None`
rv = random_table.rvs(row, col, method=method,
random_state=self.get_rng())
assert rv.shape == (2, 3)
# test size 1
rv2 = random_table.rvs(row, col, size=1, method=method,
random_state=self.get_rng())
assert rv2.shape == (1, 2, 3)
assert_equal(rv, rv2[0])
# test size 0
rv3 = random_table.rvs(row, col, size=0, method=method,
random_state=self.get_rng())
assert rv3.shape == (0, 2, 3)
# test other valid size
rv4 = random_table.rvs(row, col, size=20, method=method,
random_state=self.get_rng())
assert rv4.shape == (20, 2, 3)
rv5 = random_table.rvs(row, col, size=(4, 5), method=method,
random_state=self.get_rng())
assert rv5.shape == (4, 5, 2, 3)
assert_allclose(rv5.reshape(20, 2, 3), rv4, rtol=1e-15)
# test invalid size
message = "`size` must be a non-negative integer or `None`"
with pytest.raises(ValueError, match=message):
random_table.rvs(row, col, size=-1, method=method,
random_state=self.get_rng())
with pytest.raises(ValueError, match=message):
random_table.rvs(row, col, size=np.nan, method=method,
random_state=self.get_rng())
@pytest.mark.parametrize("method", ("boyett", "patefield"))
def test_rvs_method(self, method):
# This test assumes that pmf is correct and checks that random samples
# follow this probability distribution. This seems like a circular
# argument, since pmf is checked in test_pmf_logpmf with random samples
# generated with the rvs method. This test is not redundant, because
# test_pmf_logpmf intentionally uses rvs generation with Boyett only,
# but here we test both Boyett and Patefield.
row = [2, 6]
col = [1, 3, 4]
ct = random_table
rvs = ct.rvs(row, col, size=100000, method=method,
random_state=self.get_rng())
unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True)
# generated frequencies should match expected frequencies
p = ct.pmf(unique_rvs, row, col)
assert_allclose(p * len(rvs), counts, rtol=0.02)
@pytest.mark.parametrize("method", ("boyett", "patefield"))
def test_rvs_with_zeros_in_col_row(self, method):
row = [0, 1, 0]
col = [1, 0, 0, 0]
d = random_table(row, col)
rv = d.rvs(1000, method=method, random_state=self.get_rng())
expected = np.zeros((1000, len(row), len(col)))
expected[...] = [[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]]
assert_equal(rv, expected)
@pytest.mark.parametrize("method", (None, "boyett", "patefield"))
@pytest.mark.parametrize("col", ([], [0]))
@pytest.mark.parametrize("row", ([], [0]))
def test_rvs_with_edge_cases(self, method, row, col):
d = random_table(row, col)
rv = d.rvs(10, method=method, random_state=self.get_rng())
expected = np.zeros((10, len(row), len(col)))
assert_equal(rv, expected)
@pytest.mark.parametrize('v', (1, 2))
def test_rvs_rcont(self, v):
# This test checks the internal low-level interface.
# It is implicitly also checked by the other test_rvs* calls.
import scipy.stats._rcont as _rcont
row = np.array([1, 3], dtype=np.int64)
col = np.array([2, 1, 1], dtype=np.int64)
rvs = getattr(_rcont, f"rvs_rcont{v}")
ntot = np.sum(row)
result = rvs(row, col, ntot, 1, self.get_rng())
assert result.shape == (1, len(row), len(col))
assert np.sum(result) == ntot
def test_frozen(self):
row = [2, 6]
col = [1, 3, 4]
d = random_table(row, col, seed=self.get_rng())
sample = d.rvs()
expected = random_table.mean(row, col)
assert_equal(expected, d.mean())
expected = random_table.pmf(sample, row, col)
assert_equal(expected, d.pmf(sample))
expected = random_table.logpmf(sample, row, col)
assert_equal(expected, d.logpmf(sample))
@pytest.mark.parametrize("method", ("boyett", "patefield"))
def test_rvs_frozen(self, method):
row = [2, 6]
col = [1, 3, 4]
d = random_table(row, col, seed=self.get_rng())
expected = random_table.rvs(row, col, size=10, method=method,
random_state=self.get_rng())
got = d.rvs(size=10, method=method)
assert_equal(expected, got)
def check_pickling(distfn, args):
# check that a distribution instance pickles and unpickles
# pay special attention to the random_state property
# save the random_state (restore later)
rndm = distfn.random_state
distfn.random_state = 1234
distfn.rvs(*args, size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(*args, size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(*args, size=8)
assert_equal(r0, r1)
# restore the random_state
distfn.random_state = rndm
def test_random_state_property():
scale = np.eye(3)
scale[0, 1] = 0.5
scale[1, 0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)],
[multinomial, (5, [0.5, 0.4, 0.1])],
[ortho_group, (2,)],
[special_ortho_group, (2,)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
check_pickling(distfn, args)
class TestVonMises_Fisher:
@pytest.mark.parametrize("dim", [2, 3, 4, 6])
@pytest.mark.parametrize("size", [None, 1, 5, (5, 4)])
def test_samples(self, dim, size):
# test that samples have correct shape and norm 1
rng = np.random.default_rng(2777937887058094419)
mu = np.full((dim, ), 1/np.sqrt(dim))
vmf_dist = vonmises_fisher(mu, 1, seed=rng)
samples = vmf_dist.rvs(size)
mean, cov = np.zeros(dim), np.eye(dim)
expected_shape = rng.multivariate_normal(mean, cov, size=size).shape
assert samples.shape == expected_shape
norms = np.linalg.norm(samples, axis=-1)
assert_allclose(norms, 1.)
@pytest.mark.parametrize("dim", [5, 8])
@pytest.mark.parametrize("kappa", [1e15, 1e20, 1e30])
def test_sampling_high_concentration(self, dim, kappa):
# test that no warnings are encountered for high values
rng = np.random.default_rng(2777937887058094419)
mu = np.full((dim, ), 1/np.sqrt(dim))
vmf_dist = vonmises_fisher(mu, kappa, seed=rng)
vmf_dist.rvs(10)
def test_two_dimensional_mu(self):
mu = np.ones((2, 2))
msg = "'mu' must have one-dimensional shape."
with pytest.raises(ValueError, match=msg):
vonmises_fisher(mu, 1)
def test_wrong_norm_mu(self):
mu = np.ones((2, ))
msg = "'mu' must be a unit vector of norm 1."
with pytest.raises(ValueError, match=msg):
vonmises_fisher(mu, 1)
def test_one_entry_mu(self):
mu = np.ones((1, ))
msg = "'mu' must have at least two entries."
with pytest.raises(ValueError, match=msg):
vonmises_fisher(mu, 1)
@pytest.mark.parametrize("kappa", [-1, (5, 3)])
def test_kappa_validation(self, kappa):
msg = "'kappa' must be a positive scalar."
with pytest.raises(ValueError, match=msg):
vonmises_fisher([1, 0], kappa)
@pytest.mark.parametrize("kappa", [0, 0.])
def test_kappa_zero(self, kappa):
msg = ("For 'kappa=0' the von Mises-Fisher distribution "
"becomes the uniform distribution on the sphere "
"surface. Consider using 'scipy.stats.uniform_direction' "
"instead.")
with pytest.raises(ValueError, match=msg):
vonmises_fisher([1, 0], kappa)
@pytest.mark.parametrize("method", [vonmises_fisher.pdf,
vonmises_fisher.logpdf])
def test_invalid_shapes_pdf_logpdf(self, method):
x = np.array([1., 0., 0])
msg = ("The dimensionality of the last axis of 'x' must "
"match the dimensionality of the von Mises Fisher "
"distribution.")
with pytest.raises(ValueError, match=msg):
method(x, [1, 0], 1)
@pytest.mark.parametrize("method", [vonmises_fisher.pdf,
vonmises_fisher.logpdf])
def test_unnormalized_input(self, method):
x = np.array([0.5, 0.])
msg = "'x' must be unit vectors of norm 1 along last dimension."
with pytest.raises(ValueError, match=msg):
method(x, [1, 0], 1)
# Expected values of the vonmises-fisher logPDF were computed via mpmath
# from mpmath import mp
# import numpy as np
# mp.dps = 50
# def logpdf_mpmath(x, mu, kappa):
# dim = mu.size
# halfdim = mp.mpf(0.5 * dim)
# kappa = mp.mpf(kappa)
# const = (kappa**(halfdim - mp.one)/((2*mp.pi)**halfdim * \
# mp.besseli(halfdim -mp.one, kappa)))
# return float(const * mp.exp(kappa*mp.fdot(x, mu)))
@pytest.mark.parametrize('x, mu, kappa, reference',
[(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
1e-4, 0.0795854295583605),
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
1e-4, 0.07957747141331854),
(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
100, 15.915494309189533),
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
100, 5.920684802611232e-43),
(np.array([1., 0., 0.]),
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]),
2000, 5.930499050746588e-07),
(np.array([1., 0., 0]), np.array([1., 0., 0.]),
2000, 318.3098861837907),
(np.array([1., 0., 0., 0., 0.]),
np.array([1., 0., 0., 0., 0.]),
2000, 101371.86957712633),
(np.array([1., 0., 0., 0., 0.]),
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.,
0, 0.]),
2000, 0.00018886808182653578),
(np.array([1., 0., 0., 0., 0.]),
np.array([np.sqrt(0.8), np.sqrt(0.2), 0.,
0, 0.]),
2000, 2.0255393314603194e-87)])
def test_pdf_accuracy(self, x, mu, kappa, reference):
pdf = vonmises_fisher(mu, kappa).pdf(x)
assert_allclose(pdf, reference, rtol=1e-13)
# Expected values of the vonmises-fisher logPDF were computed via mpmath
# from mpmath import mp
# import numpy as np
# mp.dps = 50
# def logpdf_mpmath(x, mu, kappa):
# dim = mu.size
# halfdim = mp.mpf(0.5 * dim)
# kappa = mp.mpf(kappa)
# two = mp.mpf(2.)
# const = (kappa**(halfdim - mp.one)/((two*mp.pi)**halfdim * \
# mp.besseli(halfdim - mp.one, kappa)))
# return float(mp.log(const * mp.exp(kappa*mp.fdot(x, mu))))
@pytest.mark.parametrize('x, mu, kappa, reference',
[(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
1e-4, -2.5309242486359573),
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
1e-4, -2.5310242486359575),
(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
100, 2.767293119578746),
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
100, -97.23270688042125),
(np.array([1., 0., 0.]),
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]),
2000, -14.337987284534103),
(np.array([1., 0., 0]), np.array([1., 0., 0.]),
2000, 5.763025393132737),
(np.array([1., 0., 0., 0., 0.]),
np.array([1., 0., 0., 0., 0.]),
2000, 11.526550911307156),
(np.array([1., 0., 0., 0., 0.]),
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.,
0, 0.]),
2000, -8.574461766359684),
(np.array([1., 0., 0., 0., 0.]),
np.array([np.sqrt(0.8), np.sqrt(0.2), 0.,
0, 0.]),
2000, -199.61906708886113)])
def test_logpdf_accuracy(self, x, mu, kappa, reference):
logpdf = vonmises_fisher(mu, kappa).logpdf(x)
assert_allclose(logpdf, reference, rtol=1e-14)
# Expected values of the vonmises-fisher entropy were computed via mpmath
# from mpmath import mp
# import numpy as np
# mp.dps = 50
# def entropy_mpmath(dim, kappa):
# mu = np.full((dim, ), 1/np.sqrt(dim))
# kappa = mp.mpf(kappa)
# halfdim = mp.mpf(0.5 * dim)
# logconstant = (mp.log(kappa**(halfdim - mp.one)
# /((2*mp.pi)**halfdim
# * mp.besseli(halfdim -mp.one, kappa)))
# return float(-logconstant - kappa * mp.besseli(halfdim, kappa)/
# mp.besseli(halfdim -1, kappa))
@pytest.mark.parametrize('dim, kappa, reference',
[(3, 1e-4, 2.531024245302624),
(3, 100, -1.7672931195787458),
(5, 5000, -11.359032310024453),
(8, 1, 3.4189526482545527)])
def test_entropy_accuracy(self, dim, kappa, reference):
mu = np.full((dim, ), 1/np.sqrt(dim))
entropy = vonmises_fisher(mu, kappa).entropy()
assert_allclose(entropy, reference, rtol=2e-14)
@pytest.mark.parametrize("method", [vonmises_fisher.pdf,
vonmises_fisher.logpdf])
def test_broadcasting(self, method):
# test that pdf and logpdf values are correctly broadcasted
testshape = (2, 2)
rng = np.random.default_rng(2777937887058094419)
x = uniform_direction(3).rvs(testshape, random_state=rng)
mu = np.full((3, ), 1/np.sqrt(3))
kappa = 5
result_all = method(x, mu, kappa)
assert result_all.shape == testshape
for i in range(testshape[0]):
for j in range(testshape[1]):
current_val = method(x[i, j, :], mu, kappa)
assert_allclose(current_val, result_all[i, j], rtol=1e-15)
def test_vs_vonmises_2d(self):
# test that in 2D, von Mises-Fisher yields the same results
# as the von Mises distribution
rng = np.random.default_rng(2777937887058094419)
mu = np.array([0, 1])
mu_angle = np.arctan2(mu[1], mu[0])
kappa = 20
vmf = vonmises_fisher(mu, kappa)
vonmises_dist = vonmises(loc=mu_angle, kappa=kappa)
vectors = uniform_direction(2).rvs(10, random_state=rng)
angles = np.arctan2(vectors[:, 1], vectors[:, 0])
assert_allclose(vonmises_dist.entropy(), vmf.entropy())
assert_allclose(vonmises_dist.pdf(angles), vmf.pdf(vectors))
assert_allclose(vonmises_dist.logpdf(angles), vmf.logpdf(vectors))
@pytest.mark.parametrize("dim", [2, 3, 6])
@pytest.mark.parametrize("kappa, mu_tol, kappa_tol",
[(1, 5e-2, 5e-2),
(10, 1e-2, 1e-2),
(100, 5e-3, 2e-2),
(1000, 1e-3, 2e-2)])
def test_fit_accuracy(self, dim, kappa, mu_tol, kappa_tol):
mu = np.full((dim, ), 1/np.sqrt(dim))
vmf_dist = vonmises_fisher(mu, kappa)
rng = np.random.default_rng(2777937887058094419)
n_samples = 10000
samples = vmf_dist.rvs(n_samples, random_state=rng)
mu_fit, kappa_fit = vonmises_fisher.fit(samples)
angular_error = np.arccos(mu.dot(mu_fit))
assert_allclose(angular_error, 0., atol=mu_tol, rtol=0)
assert_allclose(kappa, kappa_fit, rtol=kappa_tol)
def test_fit_error_one_dimensional_data(self):
x = np.zeros((3, ))
msg = "'x' must be two dimensional."
with pytest.raises(ValueError, match=msg):
vonmises_fisher.fit(x)
def test_fit_error_unnormalized_data(self):
x = np.ones((3, 3))
msg = "'x' must be unit vectors of norm 1 along last dimension."
with pytest.raises(ValueError, match=msg):
vonmises_fisher.fit(x)
def test_frozen_distribution(self):
mu = np.array([0, 0, 1])
kappa = 5
frozen = vonmises_fisher(mu, kappa)
frozen_seed = vonmises_fisher(mu, kappa, seed=514)
rvs1 = frozen.rvs(random_state=514)
rvs2 = vonmises_fisher.rvs(mu, kappa, random_state=514)
rvs3 = frozen_seed.rvs()
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
class TestDirichletMultinomial:
@classmethod
def get_params(self, m):
rng = np.random.default_rng(28469824356873456)
alpha = rng.uniform(0, 100, size=2)
x = rng.integers(1, 20, size=(m, 2))
n = x.sum(axis=-1)
return rng, m, alpha, n, x
def test_frozen(self):
rng = np.random.default_rng(28469824356873456)
alpha = rng.uniform(0, 100, 10)
x = rng.integers(0, 10, 10)
n = np.sum(x, axis=-1)
d = dirichlet_multinomial(alpha, n)
assert_equal(d.logpmf(x), dirichlet_multinomial.logpmf(x, alpha, n))
assert_equal(d.pmf(x), dirichlet_multinomial.pmf(x, alpha, n))
assert_equal(d.mean(), dirichlet_multinomial.mean(alpha, n))
assert_equal(d.var(), dirichlet_multinomial.var(alpha, n))
assert_equal(d.cov(), dirichlet_multinomial.cov(alpha, n))
def test_pmf_logpmf_against_R(self):
# # Compare PMF against R's extraDistr ddirmnon
# # library(extraDistr)
# # options(digits=16)
# ddirmnom(c(1, 2, 3), 6, c(3, 4, 5))
x = np.array([1, 2, 3])
n = np.sum(x)
alpha = np.array([3, 4, 5])
res = dirichlet_multinomial.pmf(x, alpha, n)
logres = dirichlet_multinomial.logpmf(x, alpha, n)
ref = 0.08484162895927638
assert_allclose(res, ref)
assert_allclose(logres, np.log(ref))
assert res.shape == logres.shape == ()
# library(extraDistr)
# options(digits=16)
# ddirmnom(c(4, 3, 2, 0, 2, 3, 5, 7, 4, 7), 37,
# c(45.01025314, 21.98739582, 15.14851365, 80.21588671,
# 52.84935481, 25.20905262, 53.85373737, 4.88568118,
# 89.06440654, 20.11359466))
rng = np.random.default_rng(28469824356873456)
alpha = rng.uniform(0, 100, 10)
x = rng.integers(0, 10, 10)
n = np.sum(x, axis=-1)
res = dirichlet_multinomial(alpha, n).pmf(x)
logres = dirichlet_multinomial.logpmf(x, alpha, n)
ref = 3.65409306285992e-16
assert_allclose(res, ref)
assert_allclose(logres, np.log(ref))
def test_pmf_logpmf_support(self):
# when the sum of the category counts does not equal the number of
# trials, the PMF is zero
rng, m, alpha, n, x = self.get_params(1)
n += 1
assert_equal(dirichlet_multinomial(alpha, n).pmf(x), 0)
assert_equal(dirichlet_multinomial(alpha, n).logpmf(x), -np.inf)
rng, m, alpha, n, x = self.get_params(10)
i = rng.random(size=10) > 0.5
x[i] = np.round(x[i] * 2) # sum of these x does not equal n
assert_equal(dirichlet_multinomial(alpha, n).pmf(x)[i], 0)
assert_equal(dirichlet_multinomial(alpha, n).logpmf(x)[i], -np.inf)
assert np.all(dirichlet_multinomial(alpha, n).pmf(x)[~i] > 0)
assert np.all(dirichlet_multinomial(alpha, n).logpmf(x)[~i] > -np.inf)
def test_dimensionality_one(self):
# if the dimensionality is one, there is only one possible outcome
n = 6 # number of trials
alpha = [10] # concentration parameters
x = np.asarray([n]) # counts
dist = dirichlet_multinomial(alpha, n)
assert_equal(dist.pmf(x), 1)
assert_equal(dist.pmf(x+1), 0)
assert_equal(dist.logpmf(x), 0)
assert_equal(dist.logpmf(x+1), -np.inf)
assert_equal(dist.mean(), n)
assert_equal(dist.var(), 0)
assert_equal(dist.cov(), 0)
@pytest.mark.parametrize('method_name', ['pmf', 'logpmf'])
def test_against_betabinom_pmf(self, method_name):
rng, m, alpha, n, x = self.get_params(100)
method = getattr(dirichlet_multinomial(alpha, n), method_name)
ref_method = getattr(stats.betabinom(n, *alpha.T), method_name)
res = method(x)
ref = ref_method(x.T[0])
assert_allclose(res, ref)
@pytest.mark.parametrize('method_name', ['mean', 'var'])
def test_against_betabinom_moments(self, method_name):
rng, m, alpha, n, x = self.get_params(100)
method = getattr(dirichlet_multinomial(alpha, n), method_name)
ref_method = getattr(stats.betabinom(n, *alpha.T), method_name)
res = method()[:, 0]
ref = ref_method()
assert_allclose(res, ref)
def test_moments(self):
message = 'Needs NumPy 1.22.0 for multinomial broadcasting'
if Version(np.__version__) < Version("1.22.0"):
pytest.skip(reason=message)
rng = np.random.default_rng(28469824356873456)
dim = 5
n = rng.integers(1, 100)
alpha = rng.random(size=dim) * 10
dist = dirichlet_multinomial(alpha, n)
# Generate a random sample from the distribution using NumPy
m = 100000
p = rng.dirichlet(alpha, size=m)
x = rng.multinomial(n, p, size=m)
assert_allclose(dist.mean(), np.mean(x, axis=0), rtol=5e-3)
assert_allclose(dist.var(), np.var(x, axis=0), rtol=1e-2)
assert dist.mean().shape == dist.var().shape == (dim,)
cov = dist.cov()
assert cov.shape == (dim, dim)
assert_allclose(cov, np.cov(x.T), rtol=2e-2)
assert_equal(np.diag(cov), dist.var())
assert np.all(scipy.linalg.eigh(cov)[0] > 0) # positive definite
def test_input_validation(self):
# valid inputs
x0 = np.array([1, 2, 3])
n0 = np.sum(x0)
alpha0 = np.array([3, 4, 5])
text = "`x` must contain only non-negative integers."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf([1, -1, 3], alpha0, n0)
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf([1, 2.1, 3], alpha0, n0)
text = "`alpha` must contain only positive values."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, [3, 0, 4], n0)
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, [3, -1, 4], n0)
text = "`n` must be a positive integer."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, alpha0, 49.1)
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, alpha0, 0)
x = np.array([1, 2, 3, 4])
alpha = np.array([3, 4, 5])
text = "`x` and `alpha` must be broadcastable."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x, alpha, x.sum())
@pytest.mark.parametrize('method', ['pmf', 'logpmf'])
def test_broadcasting_pmf(self, method):
alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]])
n = np.array([[6], [7], [8]])
x = np.array([[1, 2, 3], [2, 2, 3]]).reshape((2, 1, 1, 3))
method = getattr(dirichlet_multinomial, method)
res = method(x, alpha, n)
assert res.shape == (2, 3, 4)
for i in range(len(x)):
for j in range(len(n)):
for k in range(len(alpha)):
res_ijk = res[i, j, k]
ref = method(x[i].squeeze(), alpha[k].squeeze(), n[j].squeeze())
assert_allclose(res_ijk, ref)
@pytest.mark.parametrize('method_name', ['mean', 'var', 'cov'])
def test_broadcasting_moments(self, method_name):
alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]])
n = np.array([[6], [7], [8]])
method = getattr(dirichlet_multinomial, method_name)
res = method(alpha, n)
assert res.shape == (3, 4, 3) if method_name != 'cov' else (3, 4, 3, 3)
for j in range(len(n)):
for k in range(len(alpha)):
res_ijk = res[j, k]
ref = method(alpha[k].squeeze(), n[j].squeeze())
assert_allclose(res_ijk, ref)
| 146,088
| 38.494188
| 97
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_hypotests.py
|
from itertools import product
import numpy as np
import random
import functools
import pytest
from numpy.testing import (assert_, assert_equal, assert_allclose,
assert_almost_equal) # avoid new uses
from pytest import raises as assert_raises
import scipy.stats as stats
from scipy.stats import distributions
from scipy.stats._hypotests import (epps_singleton_2samp, cramervonmises,
_cdf_cvm, cramervonmises_2samp,
_pval_cvm_2samp_exact, barnard_exact,
boschloo_exact)
from scipy.stats._mannwhitneyu import mannwhitneyu, _mwu_state
from .common_tests import check_named_results
from scipy._lib._testutils import _TestPythranFunc
class TestEppsSingleton:
def test_statistic_1(self):
# first example in Goerg & Kaiser, also in original paper of
# Epps & Singleton. Note: values do not match exactly, the
# value of the interquartile range varies depending on how
# quantiles are computed
x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35,
2.69, 0.46, -0.94, -0.37, 12.07])
y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71,
4.29, 5.00, 7.74, 8.38, 8.60])
w, p = epps_singleton_2samp(x, y)
assert_almost_equal(w, 15.14, decimal=1)
assert_almost_equal(p, 0.00442, decimal=3)
def test_statistic_2(self):
# second example in Goerg & Kaiser, again not a perfect match
x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10,
10, 10, 10))
y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1,
5, 8, 10))
w, p = epps_singleton_2samp(x, y)
assert_allclose(w, 8.900, atol=0.001)
assert_almost_equal(p, 0.06364, decimal=3)
def test_epps_singleton_array_like(self):
np.random.seed(1234)
x, y = np.arange(30), np.arange(28)
w1, p1 = epps_singleton_2samp(list(x), list(y))
w2, p2 = epps_singleton_2samp(tuple(x), tuple(y))
w3, p3 = epps_singleton_2samp(x, y)
assert_(w1 == w2 == w3)
assert_(p1 == p2 == p3)
def test_epps_singleton_size(self):
# raise error if less than 5 elements
x, y = (1, 2, 3, 4), np.arange(10)
assert_raises(ValueError, epps_singleton_2samp, x, y)
def test_epps_singleton_nonfinite(self):
# raise error if there are non-finite values
x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10)
assert_raises(ValueError, epps_singleton_2samp, x, y)
x, y = np.arange(10), (1, 2, 3, 4, 5, np.nan)
assert_raises(ValueError, epps_singleton_2samp, x, y)
def test_epps_singleton_1d_input(self):
x = np.arange(100).reshape(-1, 1)
assert_raises(ValueError, epps_singleton_2samp, x, x)
def test_names(self):
x, y = np.arange(20), np.arange(30)
res = epps_singleton_2samp(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestCvm:
# the expected values of the cdfs are taken from Table 1 in
# Csorgo / Faraway: The Exact and Asymptotic Distribution of
# Cramér-von Mises Statistics, 1996.
def test_cdf_4(self):
assert_allclose(
_cdf_cvm([0.02983, 0.04111, 0.12331, 0.94251], 4),
[0.01, 0.05, 0.5, 0.999],
atol=1e-4)
def test_cdf_10(self):
assert_allclose(
_cdf_cvm([0.02657, 0.03830, 0.12068, 0.56643], 10),
[0.01, 0.05, 0.5, 0.975],
atol=1e-4)
def test_cdf_1000(self):
assert_allclose(
_cdf_cvm([0.02481, 0.03658, 0.11889, 1.16120], 1000),
[0.01, 0.05, 0.5, 0.999],
atol=1e-4)
def test_cdf_inf(self):
assert_allclose(
_cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204]),
[0.01, 0.05, 0.5, 0.999],
atol=1e-4)
def test_cdf_support(self):
# cdf has support on [1/(12*n), n/3]
assert_equal(_cdf_cvm([1/(12*533), 533/3], 533), [0, 1])
assert_equal(_cdf_cvm([1/(12*(27 + 1)), (27 + 1)/3], 27), [0, 1])
def test_cdf_large_n(self):
# test that asymptotic cdf and cdf for large samples are close
assert_allclose(
_cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100], 10000),
_cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100]),
atol=1e-4)
def test_large_x(self):
# for large values of x and n, the series used to compute the cdf
# converges slowly.
# this leads to bug in R package goftest and MAPLE code that is
# the basis of the implemenation in scipy
# note: cdf = 1 for x >= 1000/3 and n = 1000
assert_(0.99999 < _cdf_cvm(333.3, 1000) < 1.0)
assert_(0.99999 < _cdf_cvm(333.3) < 1.0)
def test_low_p(self):
# _cdf_cvm can return values larger than 1. In that case, we just
# return a p-value of zero.
n = 12
res = cramervonmises(np.ones(n)*0.8, 'norm')
assert_(_cdf_cvm(res.statistic, n) > 1.0)
assert_equal(res.pvalue, 0)
def test_invalid_input(self):
x = np.arange(10).reshape((2, 5))
assert_raises(ValueError, cramervonmises, x, "norm")
assert_raises(ValueError, cramervonmises, [1.5], "norm")
assert_raises(ValueError, cramervonmises, (), "norm")
def test_values_R(self):
# compared against R package goftest, version 1.1.1
# goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6), "pnorm")
res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm")
assert_allclose(res.statistic, 0.288156, atol=1e-6)
assert_allclose(res.pvalue, 0.1453465, atol=1e-6)
# goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6),
# "pnorm", mean = 3, sd = 1.5)
res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm", (3, 1.5))
assert_allclose(res.statistic, 0.9426685, atol=1e-6)
assert_allclose(res.pvalue, 0.002026417, atol=1e-6)
# goftest::cvm.test(c(1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5), "pexp")
res = cramervonmises([1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5], "expon")
assert_allclose(res.statistic, 0.8421854, atol=1e-6)
assert_allclose(res.pvalue, 0.004433406, atol=1e-6)
def test_callable_cdf(self):
x, args = np.arange(5), (1.4, 0.7)
r1 = cramervonmises(x, distributions.expon.cdf)
r2 = cramervonmises(x, "expon")
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
r1 = cramervonmises(x, distributions.beta.cdf, args)
r2 = cramervonmises(x, "beta", args)
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
class TestMannWhitneyU:
def setup_method(self):
_mwu_state._recursive = True
# All magic numbers are from R wilcox.test unless otherwise specied
# https://rdrr.io/r/stats/wilcox.test.html
# --- Test Input Validation ---
def test_input_validation(self):
x = np.array([1, 2]) # generic, valid inputs
y = np.array([3, 4])
with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
mannwhitneyu([], y)
with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
mannwhitneyu(x, [])
with assert_raises(ValueError, match="`use_continuity` must be one"):
mannwhitneyu(x, y, use_continuity='ekki')
with assert_raises(ValueError, match="`alternative` must be one of"):
mannwhitneyu(x, y, alternative='ekki')
with assert_raises(ValueError, match="`axis` must be an integer"):
mannwhitneyu(x, y, axis=1.5)
with assert_raises(ValueError, match="`method` must be one of"):
mannwhitneyu(x, y, method='ekki')
def test_auto(self):
# Test that default method ('auto') chooses intended method
np.random.seed(1)
n = 8 # threshold to switch from exact to asymptotic
# both inputs are smaller than threshold; should use exact
x = np.random.rand(n-1)
y = np.random.rand(n-1)
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# one input is smaller than threshold; should use exact
x = np.random.rand(n-1)
y = np.random.rand(n+1)
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# other input is smaller than threshold; should use exact
auto = mannwhitneyu(y, x)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# both inputs are larger than threshold; should use asymptotic
x = np.random.rand(n+1)
y = np.random.rand(n+1)
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue != exact.pvalue
assert auto.pvalue == asymptotic.pvalue
# both inputs are smaller than threshold, but there is a tie
# should use asymptotic
x = np.random.rand(n-1)
y = np.random.rand(n-1)
y[3] = x[3]
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue != exact.pvalue
assert auto.pvalue == asymptotic.pvalue
# --- Test Basic Functionality ---
x = [210.052110, 110.190630, 307.918612]
y = [436.08811482466416, 416.37397329768191, 179.96975939463582,
197.8118754228619, 34.038757281225756, 138.54220550921517,
128.7769351470246, 265.92721427951852, 275.6617533155341,
592.34083395416258, 448.73177590617018, 300.61495185038905,
187.97508449019588]
# This test was written for mann_whitney_u in gh-4933.
# Originally, the p-values for alternatives were swapped;
# this has been corrected and the tests have been refactored for
# compactness, but otherwise the tests are unchanged.
# R code for comparison, e.g.:
# options(digits = 16)
# x = c(210.052110, 110.190630, 307.918612)
# y = c(436.08811482466416, 416.37397329768191, 179.96975939463582,
# 197.8118754228619, 34.038757281225756, 138.54220550921517,
# 128.7769351470246, 265.92721427951852, 275.6617533155341,
# 592.34083395416258, 448.73177590617018, 300.61495185038905,
# 187.97508449019588)
# wilcox.test(x, y, alternative="g", exact=TRUE)
cases_basic = [[{"alternative": 'two-sided', "method": "asymptotic"},
(16, 0.6865041817876)],
[{"alternative": 'less', "method": "asymptotic"},
(16, 0.3432520908938)],
[{"alternative": 'greater', "method": "asymptotic"},
(16, 0.7047591913255)],
[{"alternative": 'two-sided', "method": "exact"},
(16, 0.7035714285714)],
[{"alternative": 'less', "method": "exact"},
(16, 0.3517857142857)],
[{"alternative": 'greater', "method": "exact"},
(16, 0.6946428571429)]]
@pytest.mark.parametrize(("kwds", "expected"), cases_basic)
def test_basic(self, kwds, expected):
res = mannwhitneyu(self.x, self.y, **kwds)
assert_allclose(res, expected)
cases_continuity = [[{"alternative": 'two-sided', "use_continuity": True},
(23, 0.6865041817876)],
[{"alternative": 'less', "use_continuity": True},
(23, 0.7047591913255)],
[{"alternative": 'greater', "use_continuity": True},
(23, 0.3432520908938)],
[{"alternative": 'two-sided', "use_continuity": False},
(23, 0.6377328900502)],
[{"alternative": 'less', "use_continuity": False},
(23, 0.6811335549749)],
[{"alternative": 'greater', "use_continuity": False},
(23, 0.3188664450251)]]
@pytest.mark.parametrize(("kwds", "expected"), cases_continuity)
def test_continuity(self, kwds, expected):
# When x and y are interchanged, less and greater p-values should
# swap (compare to above). This wouldn't happen if the continuity
# correction were applied in the wrong direction. Note that less and
# greater p-values do not sum to 1 when continuity correction is on,
# which is what we'd expect. Also check that results match R when
# continuity correction is turned off.
# Note that method='asymptotic' -> exact=FALSE
# and use_continuity=False -> correct=FALSE, e.g.:
# wilcox.test(x, y, alternative="t", exact=FALSE, correct=FALSE)
res = mannwhitneyu(self.y, self.x, method='asymptotic', **kwds)
assert_allclose(res, expected)
def test_tie_correct(self):
# Test tie correction against R's wilcox.test
# options(digits = 16)
# x = c(1, 2, 3, 4)
# y = c(1, 2, 3, 4, 5)
# wilcox.test(x, y, exact=FALSE)
x = [1, 2, 3, 4]
y0 = np.array([1, 2, 3, 4, 5])
dy = np.array([0, 1, 0, 1, 0])*0.01
dy2 = np.array([0, 0, 1, 0, 0])*0.01
y = [y0-0.01, y0-dy, y0-dy2, y0, y0+dy2, y0+dy, y0+0.01]
res = mannwhitneyu(x, y, axis=-1, method="asymptotic")
U_expected = [10, 9, 8.5, 8, 7.5, 7, 6]
p_expected = [1, 0.9017048037317, 0.804080657472, 0.7086240584439,
0.6197963884941, 0.5368784563079, 0.3912672792826]
assert_equal(res.statistic, U_expected)
assert_allclose(res.pvalue, p_expected)
# --- Test Exact Distribution of U ---
# These are tabulated values of the CDF of the exact distribution of
# the test statistic from pg 52 of reference [1] (Mann-Whitney Original)
pn3 = {1: [0.25, 0.5, 0.75], 2: [0.1, 0.2, 0.4, 0.6],
3: [0.05, .1, 0.2, 0.35, 0.5, 0.65]}
pn4 = {1: [0.2, 0.4, 0.6], 2: [0.067, 0.133, 0.267, 0.4, 0.6],
3: [0.028, 0.057, 0.114, 0.2, .314, 0.429, 0.571],
4: [0.014, 0.029, 0.057, 0.1, 0.171, 0.243, 0.343, 0.443, 0.557]}
pm5 = {1: [0.167, 0.333, 0.5, 0.667],
2: [0.047, 0.095, 0.19, 0.286, 0.429, 0.571],
3: [0.018, 0.036, 0.071, 0.125, 0.196, 0.286, 0.393, 0.5, 0.607],
4: [0.008, 0.016, 0.032, 0.056, 0.095, 0.143,
0.206, 0.278, 0.365, 0.452, 0.548],
5: [0.004, 0.008, 0.016, 0.028, 0.048, 0.075, 0.111,
0.155, 0.21, 0.274, 0.345, .421, 0.5, 0.579]}
pm6 = {1: [0.143, 0.286, 0.428, 0.571],
2: [0.036, 0.071, 0.143, 0.214, 0.321, 0.429, 0.571],
3: [0.012, 0.024, 0.048, 0.083, 0.131,
0.19, 0.274, 0.357, 0.452, 0.548],
4: [0.005, 0.01, 0.019, 0.033, 0.057, 0.086, 0.129,
0.176, 0.238, 0.305, 0.381, 0.457, 0.543], # the last element
# of the previous list, 0.543, has been modified from 0.545;
# I assume it was a typo
5: [0.002, 0.004, 0.009, 0.015, 0.026, 0.041, 0.063, 0.089,
0.123, 0.165, 0.214, 0.268, 0.331, 0.396, 0.465, 0.535],
6: [0.001, 0.002, 0.004, 0.008, 0.013, 0.021, 0.032, 0.047,
0.066, 0.09, 0.12, 0.155, 0.197, 0.242, 0.294, 0.350,
0.409, 0.469, 0.531]}
def test_exact_distribution(self):
# I considered parametrize. I decided against it.
p_tables = {3: self.pn3, 4: self.pn4, 5: self.pm5, 6: self.pm6}
for n, table in p_tables.items():
for m, p in table.items():
# check p-value against table
u = np.arange(0, len(p))
assert_allclose(_mwu_state.cdf(k=u, m=m, n=n), p, atol=1e-3)
# check identity CDF + SF - PMF = 1
# ( In this implementation, SF(U) includes PMF(U) )
u2 = np.arange(0, m*n+1)
assert_allclose(_mwu_state.cdf(k=u2, m=m, n=n)
+ _mwu_state.sf(k=u2, m=m, n=n)
- _mwu_state.pmf(k=u2, m=m, n=n), 1)
# check symmetry about mean of U, i.e. pmf(U) = pmf(m*n-U)
pmf = _mwu_state.pmf(k=u2, m=m, n=n)
assert_allclose(pmf, pmf[::-1])
# check symmetry w.r.t. interchange of m, n
pmf2 = _mwu_state.pmf(k=u2, m=n, n=m)
assert_allclose(pmf, pmf2)
def test_asymptotic_behavior(self):
np.random.seed(0)
# for small samples, the asymptotic test is not very accurate
x = np.random.rand(5)
y = np.random.rand(5)
res1 = mannwhitneyu(x, y, method="exact")
res2 = mannwhitneyu(x, y, method="asymptotic")
assert res1.statistic == res2.statistic
assert np.abs(res1.pvalue - res2.pvalue) > 1e-2
# for large samples, they agree reasonably well
x = np.random.rand(40)
y = np.random.rand(40)
res1 = mannwhitneyu(x, y, method="exact")
res2 = mannwhitneyu(x, y, method="asymptotic")
assert res1.statistic == res2.statistic
assert np.abs(res1.pvalue - res2.pvalue) < 1e-3
# --- Test Corner Cases ---
def test_exact_U_equals_mean(self):
# Test U == m*n/2 with exact method
# Without special treatment, two-sided p-value > 1 because both
# one-sided p-values are > 0.5
res_l = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="less",
method="exact")
res_g = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="greater",
method="exact")
assert_equal(res_l.pvalue, res_g.pvalue)
assert res_l.pvalue > 0.5
res = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="two-sided",
method="exact")
assert_equal(res, (3, 1))
# U == m*n/2 for asymptotic case tested in test_gh_2118
# The reason it's tricky for the asymptotic test has to do with
# continuity correction.
cases_scalar = [[{"alternative": 'two-sided', "method": "asymptotic"},
(0, 1)],
[{"alternative": 'less', "method": "asymptotic"},
(0, 0.5)],
[{"alternative": 'greater', "method": "asymptotic"},
(0, 0.977249868052)],
[{"alternative": 'two-sided', "method": "exact"}, (0, 1)],
[{"alternative": 'less', "method": "exact"}, (0, 0.5)],
[{"alternative": 'greater', "method": "exact"}, (0, 1)]]
@pytest.mark.parametrize(("kwds", "result"), cases_scalar)
def test_scalar_data(self, kwds, result):
# just making sure scalars work
assert_allclose(mannwhitneyu(1, 2, **kwds), result)
def test_equal_scalar_data(self):
# when two scalars are equal, there is an -0.5/0 in the asymptotic
# approximation. R gives pvalue=1.0 for alternatives 'less' and
# 'greater' but NA for 'two-sided'. I don't see why, so I don't
# see a need for a special case to match that behavior.
assert_equal(mannwhitneyu(1, 1, method="exact"), (0.5, 1))
assert_equal(mannwhitneyu(1, 1, method="asymptotic"), (0.5, 1))
# without continuity correction, this becomes 0/0, which really
# is undefined
assert_equal(mannwhitneyu(1, 1, method="asymptotic",
use_continuity=False), (0.5, np.nan))
# --- Test Enhancements / Bug Reports ---
@pytest.mark.parametrize("method", ["asymptotic", "exact"])
def test_gh_12837_11113(self, method):
# Test that behavior for broadcastable nd arrays is appropriate:
# output shape is correct and all values are equal to when the test
# is performed on one pair of samples at a time.
# Tests that gh-12837 and gh-11113 (requests for n-d input)
# are resolved
np.random.seed(0)
# arrays are broadcastable except for axis = -3
axis = -3
m, n = 7, 10 # sample sizes
x = np.random.rand(m, 3, 8)
y = np.random.rand(6, n, 1, 8) + 0.1
res = mannwhitneyu(x, y, method=method, axis=axis)
shape = (6, 3, 8) # appropriate shape of outputs, given inputs
assert res.pvalue.shape == shape
assert res.statistic.shape == shape
# move axis of test to end for simplicity
x, y = np.moveaxis(x, axis, -1), np.moveaxis(y, axis, -1)
x = x[None, ...] # give x a zeroth dimension
assert x.ndim == y.ndim
x = np.broadcast_to(x, shape + (m,))
y = np.broadcast_to(y, shape + (n,))
assert x.shape[:-1] == shape
assert y.shape[:-1] == shape
# loop over pairs of samples
statistics = np.zeros(shape)
pvalues = np.zeros(shape)
for indices in product(*[range(i) for i in shape]):
xi = x[indices]
yi = y[indices]
temp = mannwhitneyu(xi, yi, method=method)
statistics[indices] = temp.statistic
pvalues[indices] = temp.pvalue
np.testing.assert_equal(res.pvalue, pvalues)
np.testing.assert_equal(res.statistic, statistics)
def test_gh_11355(self):
# Test for correct behavior with NaN/Inf in input
x = [1, 2, 3, 4]
y = [3, 6, 7, 8, 9, 3, 2, 1, 4, 4, 5]
res1 = mannwhitneyu(x, y)
# Inf is not a problem. This is a rank test, and it's the largest value
y[4] = np.inf
res2 = mannwhitneyu(x, y)
assert_equal(res1.statistic, res2.statistic)
assert_equal(res1.pvalue, res2.pvalue)
# NaNs should propagate by default.
y[4] = np.nan
res3 = mannwhitneyu(x, y)
assert_equal(res3.statistic, np.nan)
assert_equal(res3.pvalue, np.nan)
cases_11355 = [([1, 2, 3, 4],
[3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
10, 0.1297704873477),
([1, 2, 3, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
8.5, 0.08735617507695),
([1, 2, np.inf, 4],
[3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
17.5, 0.5988856695752),
([1, 2, np.inf, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
16, 0.4687165824462),
([1, np.inf, np.inf, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
24.5, 0.7912517950119)]
@pytest.mark.parametrize(("x", "y", "statistic", "pvalue"), cases_11355)
def test_gh_11355b(self, x, y, statistic, pvalue):
# Test for correct behavior with NaN/Inf in input
res = mannwhitneyu(x, y, method='asymptotic')
assert_allclose(res.statistic, statistic, atol=1e-12)
assert_allclose(res.pvalue, pvalue, atol=1e-12)
cases_9184 = [[True, "less", "asymptotic", 0.900775348204],
[True, "greater", "asymptotic", 0.1223118025635],
[True, "two-sided", "asymptotic", 0.244623605127],
[False, "less", "asymptotic", 0.8896643190401],
[False, "greater", "asymptotic", 0.1103356809599],
[False, "two-sided", "asymptotic", 0.2206713619198],
[True, "less", "exact", 0.8967698967699],
[True, "greater", "exact", 0.1272061272061],
[True, "two-sided", "exact", 0.2544122544123]]
@pytest.mark.parametrize(("use_continuity", "alternative",
"method", "pvalue_exp"), cases_9184)
def test_gh_9184(self, use_continuity, alternative, method, pvalue_exp):
# gh-9184 might be considered a doc-only bug. Please see the
# documentation to confirm that mannwhitneyu correctly notes
# that the output statistic is that of the first sample (x). In any
# case, check the case provided there against output from R.
# R code:
# options(digits=16)
# x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
# y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
# wilcox.test(x, y, alternative = "less", exact = FALSE)
# wilcox.test(x, y, alternative = "greater", exact = FALSE)
# wilcox.test(x, y, alternative = "two.sided", exact = FALSE)
# wilcox.test(x, y, alternative = "less", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "greater", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "two.sided", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "less", exact = TRUE)
# wilcox.test(x, y, alternative = "greater", exact = TRUE)
# wilcox.test(x, y, alternative = "two.sided", exact = TRUE)
statistic_exp = 35
x = (0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
y = (1.15, 0.88, 0.90, 0.74, 1.21)
res = mannwhitneyu(x, y, use_continuity=use_continuity,
alternative=alternative, method=method)
assert_equal(res.statistic, statistic_exp)
assert_allclose(res.pvalue, pvalue_exp)
def test_gh_6897(self):
# Test for correct behavior with empty input
with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
mannwhitneyu([], [])
def test_gh_4067(self):
# Test for correct behavior with all NaN input - default is propagate
a = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
b = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
res = mannwhitneyu(a, b)
assert_equal(res.statistic, np.nan)
assert_equal(res.pvalue, np.nan)
# All cases checked against R wilcox.test, e.g.
# options(digits=16)
# x = c(1, 2, 3)
# y = c(1.5, 2.5)
# wilcox.test(x, y, exact=FALSE, alternative='less')
cases_2118 = [[[1, 2, 3], [1.5, 2.5], "greater", (3, 0.6135850036578)],
[[1, 2, 3], [1.5, 2.5], "less", (3, 0.6135850036578)],
[[1, 2, 3], [1.5, 2.5], "two-sided", (3, 1.0)],
[[1, 2, 3], [2], "greater", (1.5, 0.681324055883)],
[[1, 2, 3], [2], "less", (1.5, 0.681324055883)],
[[1, 2, 3], [2], "two-sided", (1.5, 1)],
[[1, 2], [1, 2], "greater", (2, 0.667497228949)],
[[1, 2], [1, 2], "less", (2, 0.667497228949)],
[[1, 2], [1, 2], "two-sided", (2, 1)]]
@pytest.mark.parametrize(["x", "y", "alternative", "expected"], cases_2118)
def test_gh_2118(self, x, y, alternative, expected):
# test cases in which U == m*n/2 when method is asymptotic
# applying continuity correction could result in p-value > 1
res = mannwhitneyu(x, y, use_continuity=True, alternative=alternative,
method="asymptotic")
assert_allclose(res, expected, rtol=1e-12)
def teardown_method(self):
_mwu_state._recursive = None
class TestMannWhitneyU_iterative(TestMannWhitneyU):
def setup_method(self):
_mwu_state._recursive = False
def teardown_method(self):
_mwu_state._recursive = None
@pytest.mark.xslow
def test_mann_whitney_u_switch():
# Check that mannwhiteneyu switches between recursive and iterative
# implementations at n = 500
# ensure that recursion is not enforced
_mwu_state._recursive = None
_mwu_state._fmnks = -np.ones((1, 1, 1))
rng = np.random.default_rng(9546146887652)
x = rng.random(5)
# use iterative algorithm because n > 500
y = rng.random(501)
stats.mannwhitneyu(x, y, method='exact')
# iterative algorithm doesn't modify _mwu_state._fmnks
assert np.all(_mwu_state._fmnks == -1)
# use recursive algorithm because n <= 500
y = rng.random(500)
stats.mannwhitneyu(x, y, method='exact')
# recursive algorithm has modified _mwu_state._fmnks
assert not np.all(_mwu_state._fmnks == -1)
class TestSomersD(_TestPythranFunc):
def setup_method(self):
self.dtypes = self.ALL_INTEGER + self.ALL_FLOAT
self.arguments = {0: (np.arange(10),
self.ALL_INTEGER + self.ALL_FLOAT),
1: (np.arange(10),
self.ALL_INTEGER + self.ALL_FLOAT)}
input_array = [self.arguments[idx][0] for idx in self.arguments]
# In this case, self.partialfunc can simply be stats.somersd,
# since `alternative` is an optional argument. If it is required,
# we can use functools.partial to freeze the value, because
# we only mainly test various array inputs, not str, etc.
self.partialfunc = functools.partial(stats.somersd,
alternative='two-sided')
self.expected = self.partialfunc(*input_array)
def pythranfunc(self, *args):
res = self.partialfunc(*args)
assert_allclose(res.statistic, self.expected.statistic, atol=1e-15)
assert_allclose(res.pvalue, self.expected.pvalue, atol=1e-15)
def test_pythranfunc_keywords(self):
# Not specifying the optional keyword args
table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
res1 = stats.somersd(table)
# Specifying the optional keyword args with default value
optional_args = self.get_optional_args(stats.somersd)
res2 = stats.somersd(table, **optional_args)
# Check if the results are the same in two cases
assert_allclose(res1.statistic, res2.statistic, atol=1e-15)
assert_allclose(res1.pvalue, res2.pvalue, atol=1e-15)
def test_like_kendalltau(self):
# All tests correspond with one in test_stats.py `test_kendalltau`
# case without ties, con-dis equal zero
x = [5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 6, 3, 1, 8, 7, 4]
# Cross-check with result from SAS FREQ:
expected = (0.000000000000000, 1.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# case without ties, con-dis equal zero
x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
# Cross-check with result from SAS FREQ:
expected = (0.000000000000000, 1.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# case without ties, con-dis close to zero
x = [5, 2, 1, 3, 6, 4, 7]
y = [5, 2, 6, 3, 1, 7, 4]
# Cross-check with result from SAS FREQ:
expected = (-0.142857142857140, 0.630326953157670)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# simple case without ties
x = np.arange(10)
y = np.arange(10)
# Cross-check with result from SAS FREQ:
# SAS p value is not provided.
expected = (1.000000000000000, 0)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# swap a couple values and a couple more
x = np.arange(10)
y = np.array([0, 2, 1, 3, 4, 6, 5, 7, 8, 9])
# Cross-check with result from SAS FREQ:
expected = (0.911111111111110, 0.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# same in opposite direction
x = np.arange(10)
y = np.arange(10)[::-1]
# Cross-check with result from SAS FREQ:
# SAS p value is not provided.
expected = (-1.000000000000000, 0)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# swap a couple values and a couple more
x = np.arange(10)
y = np.array([9, 7, 8, 6, 5, 3, 4, 2, 1, 0])
# Cross-check with result from SAS FREQ:
expected = (-0.9111111111111111, 0.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# with some ties
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
# Cross-check with result from SAS FREQ:
expected = (-0.500000000000000, 0.304901788178780)
res = stats.somersd(x1, x2)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# with only ties in one or both inputs
# SAS will not produce an output for these:
# NOTE: No statistics are computed for x * y because x has fewer
# than 2 nonmissing levels.
# WARNING: No OUTPUT data set is produced for this table because a
# row or column variable has fewer than 2 nonmissing levels and no
# statistics are computed.
res = stats.somersd([2, 2, 2], [2, 2, 2])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
res = stats.somersd([2, 0, 2], [2, 2, 2])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
res = stats.somersd([2, 2, 2], [2, 0, 2])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
res = stats.somersd([0], [0])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
# empty arrays provided as input
res = stats.somersd([], [])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.somersd, x, y)
def test_asymmetry(self):
# test that somersd is asymmetric w.r.t. input order and that
# convention is as described: first input is row variable & independent
# data is from Wikipedia:
# https://en.wikipedia.org/wiki/Somers%27_D
# but currently that example contradicts itself - it says X is
# independent yet take D_XY
x = [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 1, 2,
2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
y = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
# Cross-check with result from SAS FREQ:
d_cr = 0.272727272727270
d_rc = 0.342857142857140
p = 0.092891940883700 # same p-value for either direction
res = stats.somersd(x, y)
assert_allclose(res.statistic, d_cr, atol=1e-15)
assert_allclose(res.pvalue, p, atol=1e-4)
assert_equal(res.table.shape, (3, 2))
res = stats.somersd(y, x)
assert_allclose(res.statistic, d_rc, atol=1e-15)
assert_allclose(res.pvalue, p, atol=1e-15)
assert_equal(res.table.shape, (2, 3))
def test_somers_original(self):
# test against Somers' original paper [1]
# Table 5A
# Somers' convention was column IV
table = np.array([[8, 2], [6, 5], [3, 4], [1, 3], [2, 3]])
# Our convention (and that of SAS FREQ) is row IV
table = table.T
dyx = 129/340
assert_allclose(stats.somersd(table).statistic, dyx)
# table 7A - d_yx = 1
table = np.array([[25, 0], [85, 0], [0, 30]])
dxy, dyx = 3300/5425, 3300/3300
assert_allclose(stats.somersd(table).statistic, dxy)
assert_allclose(stats.somersd(table.T).statistic, dyx)
# table 7B - d_yx < 0
table = np.array([[25, 0], [0, 30], [85, 0]])
dyx = -1800/3300
assert_allclose(stats.somersd(table.T).statistic, dyx)
def test_contingency_table_with_zero_rows_cols(self):
# test that zero rows/cols in contingency table don't affect result
N = 100
shape = 4, 6
size = np.prod(shape)
np.random.seed(0)
s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
res = stats.somersd(s)
s2 = np.insert(s, 2, np.zeros(shape[1]), axis=0)
res2 = stats.somersd(s2)
s3 = np.insert(s, 2, np.zeros(shape[0]), axis=1)
res3 = stats.somersd(s3)
s4 = np.insert(s2, 2, np.zeros(shape[0]+1), axis=1)
res4 = stats.somersd(s4)
# Cross-check with result from SAS FREQ:
assert_allclose(res.statistic, -0.116981132075470, atol=1e-15)
assert_allclose(res.statistic, res2.statistic)
assert_allclose(res.statistic, res3.statistic)
assert_allclose(res.statistic, res4.statistic)
assert_allclose(res.pvalue, 0.156376448188150, atol=1e-15)
assert_allclose(res.pvalue, res2.pvalue)
assert_allclose(res.pvalue, res3.pvalue)
assert_allclose(res.pvalue, res4.pvalue)
def test_invalid_contingency_tables(self):
N = 100
shape = 4, 6
size = np.prod(shape)
np.random.seed(0)
# start with a valid contingency table
s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
s5 = s - 2
message = "All elements of the contingency table must be non-negative"
with assert_raises(ValueError, match=message):
stats.somersd(s5)
s6 = s + 0.01
message = "All elements of the contingency table must be integer"
with assert_raises(ValueError, match=message):
stats.somersd(s6)
message = ("At least two elements of the contingency "
"table must be nonzero.")
with assert_raises(ValueError, match=message):
stats.somersd([[]])
with assert_raises(ValueError, match=message):
stats.somersd([[1]])
s7 = np.zeros((3, 3))
with assert_raises(ValueError, match=message):
stats.somersd(s7)
s7[0, 1] = 1
with assert_raises(ValueError, match=message):
stats.somersd(s7)
def test_only_ranks_matter(self):
# only ranks of input data should matter
x = [1, 2, 3]
x2 = [-1, 2.1, np.inf]
y = [3, 2, 1]
y2 = [0, -0.5, -np.inf]
res = stats.somersd(x, y)
res2 = stats.somersd(x2, y2)
assert_equal(res.statistic, res2.statistic)
assert_equal(res.pvalue, res2.pvalue)
def test_contingency_table_return(self):
# check that contingency table is returned
x = np.arange(10)
y = np.arange(10)
res = stats.somersd(x, y)
assert_equal(res.table, np.eye(10))
def test_somersd_alternative(self):
# Test alternative parameter, asymptotic method (due to tie)
# Based on scipy.stats.test_stats.TestCorrSpearman2::test_alternative
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
# strong positive correlation
expected = stats.somersd(x1, x2, alternative="two-sided")
assert expected.statistic > 0
# rank correlation > 0 -> large "less" p-value
res = stats.somersd(x1, x2, alternative="less")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
# rank correlation > 0 -> small "greater" p-value
res = stats.somersd(x1, x2, alternative="greater")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue / 2)
# reverse the direction of rank correlation
x2.reverse()
# strong negative correlation
expected = stats.somersd(x1, x2, alternative="two-sided")
assert expected.statistic < 0
# rank correlation < 0 -> large "greater" p-value
res = stats.somersd(x1, x2, alternative="greater")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
# rank correlation < 0 -> small "less" p-value
res = stats.somersd(x1, x2, alternative="less")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue / 2)
with pytest.raises(ValueError, match="alternative must be 'less'..."):
stats.somersd(x1, x2, alternative="ekki-ekki")
@pytest.mark.parametrize("positive_correlation", (False, True))
def test_somersd_perfect_correlation(self, positive_correlation):
# Before the addition of `alternative`, perfect correlation was
# treated as a special case. Now it is treated like any other case, but
# make sure there are no divide by zero warnings or associated errors
x1 = np.arange(10)
x2 = x1 if positive_correlation else np.flip(x1)
expected_statistic = 1 if positive_correlation else -1
# perfect correlation -> small "two-sided" p-value (0)
res = stats.somersd(x1, x2, alternative="two-sided")
assert res.statistic == expected_statistic
assert res.pvalue == 0
# rank correlation > 0 -> large "less" p-value (1)
res = stats.somersd(x1, x2, alternative="less")
assert res.statistic == expected_statistic
assert res.pvalue == (1 if positive_correlation else 0)
# rank correlation > 0 -> small "greater" p-value (0)
res = stats.somersd(x1, x2, alternative="greater")
assert res.statistic == expected_statistic
assert res.pvalue == (0 if positive_correlation else 1)
def test_somersd_large_inputs_gh18132(self):
# Test that large inputs where potential overflows could occur give
# the expected output. This is tested in the case of binary inputs.
# See gh-18126.
# generate lists of random classes 1-2 (binary)
classes = [1, 2]
n_samples = 10 ** 6
random.seed(6272161)
x = random.choices(classes, k=n_samples)
y = random.choices(classes, k=n_samples)
# get value to compare with: sklearn output
# from sklearn import metrics
# val_auc_sklearn = metrics.roc_auc_score(x, y)
# # convert to the Gini coefficient (Gini = (AUC*2)-1)
# val_sklearn = 2 * val_auc_sklearn - 1
val_sklearn = -0.001528138777036947
# calculate the Somers' D statistic, which should be equal to the
# result of val_sklearn until approximately machine precision
val_scipy = stats.somersd(x, y).statistic
assert_allclose(val_sklearn, val_scipy, atol=1e-15)
class TestBarnardExact:
"""Some tests to show that barnard_exact() works correctly."""
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (3.555406779643, 0.000362832367)),
([[100, 2], [1000, 5]], (-1.776382925679, 0.135126970878)),
([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
([[5, 1], [10, 10]], (1.449486150679, 0.156277546306)),
([[5, 15], [20, 20]], (-1.851640199545, 0.066363501421)),
([[5, 16], [20, 25]], (-1.609639949352, 0.116984852192)),
([[10, 5], [10, 1]], (-1.449486150679, 0.177536588915)),
([[5, 0], [1, 4]], (2.581988897472, 0.013671875000)),
([[0, 1], [3, 2]], (-1.095445115010, 0.509667991877)),
([[0, 2], [6, 4]], (-1.549193338483, 0.197019618792)),
([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
],
)
def test_precise(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
barnard.test(43, 40, 10, 39, dp=1e-6, pooled=TRUE)
```
"""
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (3.920362887717, 0.000289470662)),
([[100, 2], [1000, 5]], (-1.139432816087, 0.950272080594)),
([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
([[5, 1], [10, 10]], (1.622375939458, 0.150599922226)),
([[5, 15], [20, 20]], (-1.974771239528, 0.063038448651)),
([[5, 16], [20, 25]], (-1.722122973346, 0.133329494287)),
([[10, 5], [10, 1]], (-1.765469659009, 0.250566655215)),
([[5, 0], [1, 4]], (5.477225575052, 0.007812500000)),
([[0, 1], [3, 2]], (-1.224744871392, 0.509667991877)),
([[0, 2], [6, 4]], (-1.732050807569, 0.197019618792)),
([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
],
)
def test_pooled_param(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
barnard.test(43, 40, 10, 39, dp=1e-6, pooled=FALSE)
```
"""
res = barnard_exact(input_sample, pooled=False)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected)
def test_raises(self):
# test we raise an error for wrong input number of nuisances.
error_msg = (
"Number of points `n` must be strictly positive, found 0"
)
with assert_raises(ValueError, match=error_msg):
barnard_exact([[1, 2], [3, 4]], n=0)
# test we raise an error for wrong shape of input.
error_msg = "The input `table` must be of shape \\(2, 2\\)."
with assert_raises(ValueError, match=error_msg):
barnard_exact(np.arange(6).reshape(2, 3))
# Test all values must be positives
error_msg = "All values in `table` must be nonnegative."
with assert_raises(ValueError, match=error_msg):
barnard_exact([[-1, 2], [3, 4]])
# Test value error on wrong alternative param
error_msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
" found .*"
)
with assert_raises(ValueError, match=error_msg):
barnard_exact([[1, 2], [3, 4]], "not-correct")
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 0], [4, 3]], (1.0, 0)),
],
)
def test_edge_cases(self, input_sample, expected):
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 5], [0, 10]], (1.0, np.nan)),
([[5, 0], [10, 0]], (1.0, np.nan)),
],
)
def test_row_or_col_zero(self, input_sample, expected):
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
@pytest.mark.parametrize(
"input_sample,expected",
[
([[2, 7], [8, 2]], (-2.518474945157, 0.009886140845)),
([[7, 200], [300, 8]], (-21.320036698460, 0.0)),
([[21, 28], [1957, 6]], (-30.489638143953, 0.0)),
],
)
@pytest.mark.parametrize("alternative", ["greater", "less"])
def test_less_greater(self, input_sample, expected, alternative):
"""
"The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
a = barnard.test(2, 7, 8, 2, dp=1e-6, pooled=TRUE)
a$p.value[1]
```
In this test, we are using the "one-sided" return value `a$p.value[1]`
to test our pvalue.
"""
expected_stat, less_pvalue_expect = expected
if alternative == "greater":
input_sample = np.array(input_sample)[:, ::-1]
expected_stat = -expected_stat
res = barnard_exact(input_sample, alternative=alternative)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose(
[statistic, pvalue], [expected_stat, less_pvalue_expect], atol=1e-7
)
class TestBoschlooExact:
"""Some tests to show that boschloo_exact() works correctly."""
ATOL = 1e-7
@pytest.mark.parametrize(
"input_sample,expected",
[
([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
([[5, 1], [10, 10]], (0.9782609, 0.9450994)),
([[5, 16], [20, 25]], (0.08913823, 0.05827348)),
([[10, 5], [10, 1]], (0.1652174, 0.08565611)),
([[5, 0], [1, 4]], (1, 1)),
([[0, 1], [3, 2]], (0.5, 0.34375)),
([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
([[7, 12], [8, 3]], (0.06406797, 0.03410916)),
([[10, 24], [25, 37]], (0.2009359, 0.1512882)),
],
)
def test_less(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-8 :
```R
library(Exact)
options(digits=10)
data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
a = exact.test(data, method="Boschloo", alternative="less",
tsmethod="central", np.interval=TRUE, beta=1e-8)
```
"""
res = boschloo_exact(input_sample, alternative="less")
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (0.0002875544, 0.0001615562)),
([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
([[5, 1], [10, 10]], (0.1652174, 0.09008534)),
([[5, 15], [20, 20]], (0.9849087, 0.9706997)),
([[5, 16], [20, 25]], (0.972349, 0.9524124)),
([[5, 0], [1, 4]], (0.02380952, 0.006865367)),
([[0, 1], [3, 2]], (1, 1)),
([[0, 2], [6, 4]], (1, 1)),
([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
([[7, 12], [8, 3]], (0.9895302, 0.9771215)),
([[10, 24], [25, 37]], (0.9012936, 0.8633275)),
],
)
def test_greater(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-8 :
```R
library(Exact)
options(digits=10)
data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
a = exact.test(data, method="Boschloo", alternative="greater",
tsmethod="central", np.interval=TRUE, beta=1e-8)
```
"""
res = boschloo_exact(input_sample, alternative="greater")
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (0.0002875544, 0.0003231115)),
([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
([[5, 1], [10, 10]], (0.1652174, 0.1801707)),
([[5, 16], [20, 25]], (0.08913823, 0.116547)),
([[5, 0], [1, 4]], (0.02380952, 0.01373073)),
([[0, 1], [3, 2]], (0.5, 0.6875)),
([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
([[7, 12], [8, 3]], (0.06406797, 0.06821831)),
],
)
def test_two_sided(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-8 :
```R
library(Exact)
options(digits=10)
data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
a = exact.test(data, method="Boschloo", alternative="two.sided",
tsmethod="central", np.interval=TRUE, beta=1e-8)
```
"""
res = boschloo_exact(input_sample, alternative="two-sided", n=64)
# Need n = 64 for python 32-bit
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
def test_raises(self):
# test we raise an error for wrong input number of nuisances.
error_msg = (
"Number of points `n` must be strictly positive, found 0"
)
with assert_raises(ValueError, match=error_msg):
boschloo_exact([[1, 2], [3, 4]], n=0)
# test we raise an error for wrong shape of input.
error_msg = "The input `table` must be of shape \\(2, 2\\)."
with assert_raises(ValueError, match=error_msg):
boschloo_exact(np.arange(6).reshape(2, 3))
# Test all values must be positives
error_msg = "All values in `table` must be nonnegative."
with assert_raises(ValueError, match=error_msg):
boschloo_exact([[-1, 2], [3, 4]])
# Test value error on wrong alternative param
error_msg = (
r"`alternative` should be one of \('two-sided', 'less', "
r"'greater'\), found .*"
)
with assert_raises(ValueError, match=error_msg):
boschloo_exact([[1, 2], [3, 4]], "not-correct")
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 5], [0, 10]], (np.nan, np.nan)),
([[5, 0], [10, 0]], (np.nan, np.nan)),
],
)
def test_row_or_col_zero(self, input_sample, expected):
res = boschloo_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
def test_two_sided_gt_1(self):
# Check that returned p-value does not exceed 1 even when twice
# the minimum of the one-sided p-values does. See gh-15345.
tbl = [[1, 1], [13, 12]]
pl = boschloo_exact(tbl, alternative='less').pvalue
pg = boschloo_exact(tbl, alternative='greater').pvalue
assert 2*min(pl, pg) > 1
pt = boschloo_exact(tbl, alternative='two-sided').pvalue
assert pt == 1.0
@pytest.mark.parametrize("alternative", ("less", "greater"))
def test_against_fisher_exact(self, alternative):
# Check that the statistic of `boschloo_exact` is the same as the
# p-value of `fisher_exact` (for one-sided tests). See gh-15345.
tbl = [[2, 7], [8, 2]]
boschloo_stat = boschloo_exact(tbl, alternative=alternative).statistic
fisher_p = stats.fisher_exact(tbl, alternative=alternative)[1]
assert_allclose(boschloo_stat, fisher_p)
class TestCvm_2samp:
def test_invalid_input(self):
x = np.arange(10).reshape((2, 5))
y = np.arange(5)
msg = 'The samples must be one-dimensional'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(x, y)
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, x)
msg = 'x and y must contain at least two observations.'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp([], y)
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, [1])
msg = 'method must be either auto, exact or asymptotic'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, y, 'xyz')
def test_list_input(self):
x = [2, 3, 4, 7, 6]
y = [0.2, 0.7, 12, 18]
r1 = cramervonmises_2samp(x, y)
r2 = cramervonmises_2samp(np.array(x), np.array(y))
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
def test_example_conover(self):
# Example 2 in Section 6.2 of W.J. Conover: Practical Nonparametric
# Statistics, 1971.
x = [7.6, 8.4, 8.6, 8.7, 9.3, 9.9, 10.1, 10.6, 11.2]
y = [5.2, 5.7, 5.9, 6.5, 6.8, 8.2, 9.1, 9.8, 10.8, 11.3, 11.5, 12.3,
12.5, 13.4, 14.6]
r = cramervonmises_2samp(x, y)
assert_allclose(r.statistic, 0.262, atol=1e-3)
assert_allclose(r.pvalue, 0.18, atol=1e-2)
@pytest.mark.parametrize('statistic, m, n, pval',
[(710, 5, 6, 48./462),
(1897, 7, 7, 117./1716),
(576, 4, 6, 2./210),
(1764, 6, 7, 2./1716)])
def test_exact_pvalue(self, statistic, m, n, pval):
# the exact values are taken from Anderson: On the distribution of the
# two-sample Cramer-von-Mises criterion, 1962.
# The values are taken from Table 2, 3, 4 and 5
assert_equal(_pval_cvm_2samp_exact(statistic, m, n), pval)
def test_large_sample(self):
# for large samples, the statistic U gets very large
# do a sanity check that p-value is not 0, 1 or nan
np.random.seed(4367)
x = distributions.norm.rvs(size=1000000)
y = distributions.norm.rvs(size=900000)
r = cramervonmises_2samp(x, y)
assert_(0 < r.pvalue < 1)
r = cramervonmises_2samp(x, y+0.1)
assert_(0 < r.pvalue < 1)
def test_exact_vs_asymptotic(self):
np.random.seed(0)
x = np.random.rand(7)
y = np.random.rand(8)
r1 = cramervonmises_2samp(x, y, method='exact')
r2 = cramervonmises_2samp(x, y, method='asymptotic')
assert_equal(r1.statistic, r2.statistic)
assert_allclose(r1.pvalue, r2.pvalue, atol=1e-2)
def test_method_auto(self):
x = np.arange(20)
y = [0.5, 4.7, 13.1]
r1 = cramervonmises_2samp(x, y, method='exact')
r2 = cramervonmises_2samp(x, y, method='auto')
assert_equal(r1.pvalue, r2.pvalue)
# switch to asymptotic if one sample has more than 20 observations
x = np.arange(21)
r1 = cramervonmises_2samp(x, y, method='asymptotic')
r2 = cramervonmises_2samp(x, y, method='auto')
assert_equal(r1.pvalue, r2.pvalue)
def test_same_input(self):
# make sure trivial edge case can be handled
# note that _cdf_cvm_inf(0) = nan. implementation avoids nan by
# returning pvalue=1 for very small values of the statistic
x = np.arange(15)
res = cramervonmises_2samp(x, x)
assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
# check exact p-value
res = cramervonmises_2samp(x[:4], x[:4])
assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
class TestTukeyHSD:
data_same_size = ([24.5, 23.5, 26.4, 27.1, 29.9],
[28.4, 34.2, 29.5, 32.2, 30.1],
[26.1, 28.3, 24.3, 26.2, 27.8])
data_diff_size = ([24.5, 23.5, 26.28, 26.4, 27.1, 29.9, 30.1, 30.1],
[28.4, 34.2, 29.5, 32.2, 30.1],
[26.1, 28.3, 24.3, 26.2, 27.8])
extreme_size = ([24.5, 23.5, 26.4],
[28.4, 34.2, 29.5, 32.2, 30.1, 28.4, 34.2, 29.5, 32.2,
30.1],
[26.1, 28.3, 24.3, 26.2, 27.8])
sas_same_size = """
Comparison LowerCL Difference UpperCL Significance
2 - 3 0.6908830568 4.34 7.989116943 1
2 - 1 0.9508830568 4.6 8.249116943 1
3 - 2 -7.989116943 -4.34 -0.6908830568 1
3 - 1 -3.389116943 0.26 3.909116943 0
1 - 2 -8.249116943 -4.6 -0.9508830568 1
1 - 3 -3.909116943 -0.26 3.389116943 0
"""
sas_diff_size = """
Comparison LowerCL Difference UpperCL Significance
2 - 1 0.2679292645 3.645 7.022070736 1
2 - 3 0.5934764007 4.34 8.086523599 1
1 - 2 -7.022070736 -3.645 -0.2679292645 1
1 - 3 -2.682070736 0.695 4.072070736 0
3 - 2 -8.086523599 -4.34 -0.5934764007 1
3 - 1 -4.072070736 -0.695 2.682070736 0
"""
sas_extreme = """
Comparison LowerCL Difference UpperCL Significance
2 - 3 1.561605075 4.34 7.118394925 1
2 - 1 2.740784879 6.08 9.419215121 1
3 - 2 -7.118394925 -4.34 -1.561605075 1
3 - 1 -1.964526566 1.74 5.444526566 0
1 - 2 -9.419215121 -6.08 -2.740784879 1
1 - 3 -5.444526566 -1.74 1.964526566 0
"""
@pytest.mark.parametrize("data,res_expect_str,atol",
((data_same_size, sas_same_size, 1e-4),
(data_diff_size, sas_diff_size, 1e-4),
(extreme_size, sas_extreme, 1e-10),
),
ids=["equal size sample",
"unequal sample size",
"extreme sample size differences"])
def test_compare_sas(self, data, res_expect_str, atol):
'''
SAS code used to generate results for each sample:
DATA ACHE;
INPUT BRAND RELIEF;
CARDS;
1 24.5
...
3 27.8
;
ods graphics on; ODS RTF;ODS LISTING CLOSE;
PROC ANOVA DATA=ACHE;
CLASS BRAND;
MODEL RELIEF=BRAND;
MEANS BRAND/TUKEY CLDIFF;
TITLE 'COMPARE RELIEF ACROSS MEDICINES - ANOVA EXAMPLE';
ods output CLDiffs =tc;
proc print data=tc;
format LowerCL 17.16 UpperCL 17.16 Difference 17.16;
title "Output with many digits";
RUN;
QUIT;
ODS RTF close;
ODS LISTING;
'''
res_expect = np.asarray(res_expect_str.replace(" - ", " ").split()[5:],
dtype=float).reshape((6, 6))
res_tukey = stats.tukey_hsd(*data)
conf = res_tukey.confidence_interval()
# loop over the comparisons
for i, j, l, s, h, sig in res_expect:
i, j = int(i) - 1, int(j) - 1
assert_allclose(conf.low[i, j], l, atol=atol)
assert_allclose(res_tukey.statistic[i, j], s, atol=atol)
assert_allclose(conf.high[i, j], h, atol=atol)
assert_allclose((res_tukey.pvalue[i, j] <= .05), sig == 1)
matlab_sm_siz = """
1 2 -8.2491590248597 -4.6 -0.9508409751403 0.0144483269098
1 3 -3.9091590248597 -0.26 3.3891590248597 0.9803107240900
2 3 0.6908409751403 4.34 7.9891590248597 0.0203311368795
"""
matlab_diff_sz = """
1 2 -7.02207069748501 -3.645 -0.26792930251500 0.03371498443080
1 3 -2.68207069748500 0.695 4.07207069748500 0.85572267328807
2 3 0.59347644287720 4.34 8.08652355712281 0.02259047020620
"""
@pytest.mark.parametrize("data,res_expect_str,atol",
((data_same_size, matlab_sm_siz, 1e-12),
(data_diff_size, matlab_diff_sz, 1e-7)),
ids=["equal size sample",
"unequal size sample"])
def test_compare_matlab(self, data, res_expect_str, atol):
"""
vals = [24.5, 23.5, 26.4, 27.1, 29.9, 28.4, 34.2, 29.5, 32.2, 30.1,
26.1, 28.3, 24.3, 26.2, 27.8]
names = {'zero', 'zero', 'zero', 'zero', 'zero', 'one', 'one', 'one',
'one', 'one', 'two', 'two', 'two', 'two', 'two'}
[p,t,stats] = anova1(vals,names,"off");
[c,m,h,nms] = multcompare(stats, "CType","hsd");
"""
res_expect = np.asarray(res_expect_str.split(),
dtype=float).reshape((3, 6))
res_tukey = stats.tukey_hsd(*data)
conf = res_tukey.confidence_interval()
# loop over the comparisons
for i, j, l, s, h, p in res_expect:
i, j = int(i) - 1, int(j) - 1
assert_allclose(conf.low[i, j], l, atol=atol)
assert_allclose(res_tukey.statistic[i, j], s, atol=atol)
assert_allclose(conf.high[i, j], h, atol=atol)
assert_allclose(res_tukey.pvalue[i, j], p, atol=atol)
def test_compare_r(self):
"""
Testing against results and p-values from R:
from: https://www.rdocumentation.org/packages/stats/versions/3.6.2/
topics/TukeyHSD
> require(graphics)
> summary(fm1 <- aov(breaks ~ tension, data = warpbreaks))
> TukeyHSD(fm1, "tension", ordered = TRUE)
> plot(TukeyHSD(fm1, "tension"))
Tukey multiple comparisons of means
95% family-wise confidence level
factor levels have been ordered
Fit: aov(formula = breaks ~ tension, data = warpbreaks)
$tension
"""
str_res = """
diff lwr upr p adj
2 - 3 4.722222 -4.8376022 14.28205 0.4630831
1 - 3 14.722222 5.1623978 24.28205 0.0014315
1 - 2 10.000000 0.4401756 19.55982 0.0384598
"""
res_expect = np.asarray(str_res.replace(" - ", " ").split()[5:],
dtype=float).reshape((3, 6))
data = ([26, 30, 54, 25, 70, 52, 51, 26, 67,
27, 14, 29, 19, 29, 31, 41, 20, 44],
[18, 21, 29, 17, 12, 18, 35, 30, 36,
42, 26, 19, 16, 39, 28, 21, 39, 29],
[36, 21, 24, 18, 10, 43, 28, 15, 26,
20, 21, 24, 17, 13, 15, 15, 16, 28])
res_tukey = stats.tukey_hsd(*data)
conf = res_tukey.confidence_interval()
# loop over the comparisons
for i, j, s, l, h, p in res_expect:
i, j = int(i) - 1, int(j) - 1
# atols are set to the number of digits present in the r result.
assert_allclose(conf.low[i, j], l, atol=1e-7)
assert_allclose(res_tukey.statistic[i, j], s, atol=1e-6)
assert_allclose(conf.high[i, j], h, atol=1e-5)
assert_allclose(res_tukey.pvalue[i, j], p, atol=1e-7)
def test_engineering_stat_handbook(self):
'''
Example sourced from:
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm
'''
group1 = [6.9, 5.4, 5.8, 4.6, 4.0]
group2 = [8.3, 6.8, 7.8, 9.2, 6.5]
group3 = [8.0, 10.5, 8.1, 6.9, 9.3]
group4 = [5.8, 3.8, 6.1, 5.6, 6.2]
res = stats.tukey_hsd(group1, group2, group3, group4)
conf = res.confidence_interval()
lower = np.asarray([
[0, 0, 0, -2.25],
[.29, 0, -2.93, .13],
[1.13, 0, 0, .97],
[0, 0, 0, 0]])
upper = np.asarray([
[0, 0, 0, 1.93],
[4.47, 0, 1.25, 4.31],
[5.31, 0, 0, 5.15],
[0, 0, 0, 0]])
for (i, j) in [(1, 0), (2, 0), (0, 3), (1, 2), (2, 3)]:
assert_allclose(conf.low[i, j], lower[i, j], atol=1e-2)
assert_allclose(conf.high[i, j], upper[i, j], atol=1e-2)
def test_rand_symm(self):
# test some expected identities of the results
np.random.seed(1234)
data = np.random.rand(3, 100)
res = stats.tukey_hsd(*data)
conf = res.confidence_interval()
# the confidence intervals should be negated symmetric of each other
assert_equal(conf.low, -conf.high.T)
# the `high` and `low` center diagonals should be the same since the
# mean difference in a self comparison is 0.
assert_equal(np.diagonal(conf.high), conf.high[0, 0])
assert_equal(np.diagonal(conf.low), conf.low[0, 0])
# statistic array should be antisymmetric with zeros on the diagonal
assert_equal(res.statistic, -res.statistic.T)
assert_equal(np.diagonal(res.statistic), 0)
# p-values should be symmetric and 1 when compared to itself
assert_equal(res.pvalue, res.pvalue.T)
assert_equal(np.diagonal(res.pvalue), 1)
def test_no_inf(self):
with assert_raises(ValueError, match="...must be finite."):
stats.tukey_hsd([1, 2, 3], [2, np.inf], [6, 7, 3])
def test_is_1d(self):
with assert_raises(ValueError, match="...must be one-dimensional"):
stats.tukey_hsd([[1, 2], [2, 3]], [2, 5], [5, 23, 6])
def test_no_empty(self):
with assert_raises(ValueError, match="...must be greater than one"):
stats.tukey_hsd([], [2, 5], [4, 5, 6])
@pytest.mark.parametrize("nargs", (0, 1))
def test_not_enough_treatments(self, nargs):
with assert_raises(ValueError, match="...more than 1 treatment."):
stats.tukey_hsd(*([[23, 7, 3]] * nargs))
@pytest.mark.parametrize("cl", [-.5, 0, 1, 2])
def test_conf_level_invalid(self, cl):
with assert_raises(ValueError, match="must be between 0 and 1"):
r = stats.tukey_hsd([23, 7, 3], [3, 4], [9, 4])
r.confidence_interval(cl)
def test_2_args_ttest(self):
# that with 2 treatments the `pvalue` is equal to that of `ttest_ind`
res_tukey = stats.tukey_hsd(*self.data_diff_size[:2])
res_ttest = stats.ttest_ind(*self.data_diff_size[:2])
assert_allclose(res_ttest.pvalue, res_tukey.pvalue[0, 1])
assert_allclose(res_ttest.pvalue, res_tukey.pvalue[1, 0])
class TestPoissonMeansTest:
@pytest.mark.parametrize("c1, n1, c2, n2, p_expect", (
# example from [1], 6. Illustrative examples: Example 1
[0, 100, 3, 100, 0.0884],
[2, 100, 6, 100, 0.1749]
))
def test_paper_examples(self, c1, n1, c2, n2, p_expect):
res = stats.poisson_means_test(c1, n1, c2, n2)
assert_allclose(res.pvalue, p_expect, atol=1e-4)
@pytest.mark.parametrize("c1, n1, c2, n2, p_expect, alt, d", (
# These test cases are produced by the wrapped fortran code from the
# original authors. Using a slightly modified version of this fortran,
# found here, https://github.com/nolanbconaway/poisson-etest,
# additional tests were created.
[20, 10, 20, 10, 0.9999997568929630, 'two-sided', 0],
[10, 10, 10, 10, 0.9999998403241203, 'two-sided', 0],
[50, 15, 1, 1, 0.09920321053409643, 'two-sided', .05],
[3, 100, 20, 300, 0.12202725450896404, 'two-sided', 0],
[3, 12, 4, 20, 0.40416087318539173, 'greater', 0],
[4, 20, 3, 100, 0.008053640402974236, 'greater', 0],
# publishing paper does not include a `less` alternative,
# so it was calculated with switched argument order and
# alternative="greater"
[4, 20, 3, 10, 0.3083216325432898, 'less', 0],
[1, 1, 50, 15, 0.09322998607245102, 'less', 0]
))
def test_fortran_authors(self, c1, n1, c2, n2, p_expect, alt, d):
res = stats.poisson_means_test(c1, n1, c2, n2, alternative=alt, diff=d)
assert_allclose(res.pvalue, p_expect, atol=2e-6, rtol=1e-16)
def test_different_results(self):
# The implementation in Fortran is known to break down at higher
# counts and observations, so we expect different results. By
# inspection we can infer the p-value to be near one.
count1, count2 = 10000, 10000
nobs1, nobs2 = 10000, 10000
res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
assert_allclose(res.pvalue, 1)
def test_less_than_zero_lambda_hat2(self):
# demonstrates behavior that fixes a known fault from original Fortran.
# p-value should clearly be near one.
count1, count2 = 0, 0
nobs1, nobs2 = 1, 1
res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
assert_allclose(res.pvalue, 1)
def test_input_validation(self):
count1, count2 = 0, 0
nobs1, nobs2 = 1, 1
# test non-integral events
message = '`k1` and `k2` must be integers.'
with assert_raises(TypeError, match=message):
stats.poisson_means_test(.7, nobs1, count2, nobs2)
with assert_raises(TypeError, match=message):
stats.poisson_means_test(count1, nobs1, .7, nobs2)
# test negative events
message = '`k1` and `k2` must be greater than or equal to 0.'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(-1, nobs1, count2, nobs2)
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, nobs1, -1, nobs2)
# test negative sample size
message = '`n1` and `n2` must be greater than 0.'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, -1, count2, nobs2)
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, nobs1, count2, -1)
# test negative difference
message = 'diff must be greater than or equal to 0.'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, nobs1, count2, nobs2, diff=-1)
# test invalid alternatvie
message = 'Alternative must be one of ...'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(1, 2, 1, 2, alternative='error')
class TestBWSTest:
def test_bws_input_validation(self):
rng = np.random.default_rng(4571775098104213308)
x, y = rng.random(size=(2, 7))
message = '`x` and `y` must be exactly one-dimensional.'
with pytest.raises(ValueError, match=message):
stats.bws_test([x, x], [y, y])
message = '`x` and `y` must not contain NaNs.'
with pytest.raises(ValueError, match=message):
stats.bws_test([np.nan], y)
message = '`x` and `y` must be of nonzero size.'
with pytest.raises(ValueError, match=message):
stats.bws_test(x, [])
message = 'alternative` must be one of...'
with pytest.raises(ValueError, match=message):
stats.bws_test(x, y, alternative='ekki-ekki')
message = 'method` must be an instance of...'
with pytest.raises(ValueError, match=message):
stats.bws_test(x, y, method=42)
def test_against_published_reference(self):
# Test against Example 2 in bws_test Reference [1], pg 9
# https://link.springer.com/content/pdf/10.1007/BF02762032.pdf
x = [1, 2, 3, 4, 6, 7, 8]
y = [5, 9, 10, 11, 12, 13, 14]
res = stats.bws_test(x, y, alternative='two-sided')
assert_allclose(res.statistic, 5.132, atol=1e-3)
assert_equal(res.pvalue, 10/3432)
@pytest.mark.parametrize(('alternative', 'statistic', 'pvalue'),
[('two-sided', 1.7510204081633, 0.1264422777777),
('less', -1.7510204081633, 0.05754662004662),
('greater', -1.7510204081633, 0.9424533799534)])
def test_against_R(self, alternative, statistic, pvalue):
# Test against R library BWStest function bws_test
# library(BWStest)
# options(digits=16)
# x = c(...)
# y = c(...)
# bws_test(x, y, alternative='two.sided')
rng = np.random.default_rng(4571775098104213308)
x, y = rng.random(size=(2, 7))
res = stats.bws_test(x, y, alternative=alternative)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose(res.pvalue, pvalue, atol=1e-2, rtol=1e-1)
@pytest.mark.parametrize(('alternative', 'statistic', 'pvalue'),
[('two-sided', 1.142629265891, 0.2903950180801),
('less', 0.99629665877411, 0.8545660222131),
('greater', 0.99629665877411, 0.1454339777869)])
def test_against_R_imbalanced(self, alternative, statistic, pvalue):
# Test against R library BWStest function bws_test
# library(BWStest)
# options(digits=16)
# x = c(...)
# y = c(...)
# bws_test(x, y, alternative='two.sided')
rng = np.random.default_rng(5429015622386364034)
x = rng.random(size=9)
y = rng.random(size=8)
res = stats.bws_test(x, y, alternative=alternative)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose(res.pvalue, pvalue, atol=1e-2, rtol=1e-1)
def test_method(self):
# Test that `method` parameter has the desired effect
rng = np.random.default_rng(1520514347193347862)
x, y = rng.random(size=(2, 10))
rng = np.random.default_rng(1520514347193347862)
method = stats.PermutationMethod(n_resamples=10, random_state=rng)
res1 = stats.bws_test(x, y, method=method)
assert len(res1.null_distribution) == 10
rng = np.random.default_rng(1520514347193347862)
method = stats.PermutationMethod(n_resamples=10, random_state=rng)
res2 = stats.bws_test(x, y, method=method)
assert_allclose(res1.null_distribution, res2.null_distribution)
rng = np.random.default_rng(5205143471933478621)
method = stats.PermutationMethod(n_resamples=10, random_state=rng)
res3 = stats.bws_test(x, y, method=method)
assert not np.allclose(res3.null_distribution, res1.null_distribution)
def test_directions(self):
# Sanity check of the sign of the one-sided statistic
rng = np.random.default_rng(1520514347193347862)
x = rng.random(size=5)
y = x - 1
res = stats.bws_test(x, y, alternative='greater')
assert res.statistic > 0
assert_equal(res.pvalue, 1 / len(res.null_distribution))
res = stats.bws_test(x, y, alternative='less')
assert res.statistic > 0
assert_equal(res.pvalue, 1)
res = stats.bws_test(y, x, alternative='less')
assert res.statistic < 0
assert_equal(res.pvalue, 1 / len(res.null_distribution))
res = stats.bws_test(y, x, alternative='greater')
assert res.statistic < 0
assert_equal(res.pvalue, 1)
| 78,912
| 41.494884
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_qmc.py
|
import os
from collections import Counter
from itertools import combinations, product
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_array_equal
from scipy.spatial import distance
from scipy.stats import shapiro
from scipy.stats._sobol import _test_find_index
from scipy.stats import qmc
from scipy.stats._qmc import (
van_der_corput, n_primes, primes_from_2_to,
update_discrepancy, QMCEngine, _l1_norm,
_perturb_discrepancy, _lloyd_centroidal_voronoi_tessellation
) # noqa
class TestUtils:
def test_scale(self):
# 1d scalar
space = [[0], [1], [0.5]]
out = [[-2], [6], [2]]
scaled_space = qmc.scale(space, l_bounds=-2, u_bounds=6)
assert_allclose(scaled_space, out)
# 2d space
space = [[0, 0], [1, 1], [0.5, 0.5]]
bounds = np.array([[-2, 0], [6, 5]])
out = [[-2, 0], [6, 5], [2, 2.5]]
scaled_space = qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
assert_allclose(scaled_space, out)
scaled_back_space = qmc.scale(scaled_space, l_bounds=bounds[0],
u_bounds=bounds[1], reverse=True)
assert_allclose(scaled_back_space, space)
# broadcast
space = [[0, 0, 0], [1, 1, 1], [0.5, 0.5, 0.5]]
l_bounds, u_bounds = 0, [6, 5, 3]
out = [[0, 0, 0], [6, 5, 3], [3, 2.5, 1.5]]
scaled_space = qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds)
assert_allclose(scaled_space, out)
def test_scale_random(self):
rng = np.random.default_rng(317589836511269190194010915937762468165)
sample = rng.random((30, 10))
a = -rng.random(10) * 10
b = rng.random(10) * 10
scaled = qmc.scale(sample, a, b, reverse=False)
unscaled = qmc.scale(scaled, a, b, reverse=True)
assert_allclose(unscaled, sample)
def test_scale_errors(self):
with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
space = [0, 1, 0.5]
qmc.scale(space, l_bounds=-2, u_bounds=6)
with pytest.raises(ValueError, match=r"Bounds are not consistent"):
space = [[0, 0], [1, 1], [0.5, 0.5]]
bounds = np.array([[-2, 6], [6, 5]])
qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
with pytest.raises(ValueError, match=r"'l_bounds' and 'u_bounds'"
r" must be broadcastable"):
space = [[0, 0], [1, 1], [0.5, 0.5]]
l_bounds, u_bounds = [-2, 0, 2], [6, 5]
qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds)
with pytest.raises(ValueError, match=r"'l_bounds' and 'u_bounds'"
r" must be broadcastable"):
space = [[0, 0], [1, 1], [0.5, 0.5]]
bounds = np.array([[-2, 0, 2], [6, 5, 5]])
qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
with pytest.raises(ValueError, match=r"Sample is not in unit "
r"hypercube"):
space = [[0, 0], [1, 1.5], [0.5, 0.5]]
bounds = np.array([[-2, 0], [6, 5]])
qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
with pytest.raises(ValueError, match=r"Sample is out of bounds"):
out = [[-2, 0], [6, 5], [8, 2.5]]
bounds = np.array([[-2, 0], [6, 5]])
qmc.scale(out, l_bounds=bounds[0], u_bounds=bounds[1],
reverse=True)
def test_discrepancy(self):
space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0)
space_2 = np.array([[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]])
space_2 = (2.0 * space_2 - 1.0) / (2.0 * 6.0)
# From Fang et al. Design and modeling for computer experiments, 2006
assert_allclose(qmc.discrepancy(space_1), 0.0081, atol=1e-4)
assert_allclose(qmc.discrepancy(space_2), 0.0105, atol=1e-4)
# From Zhou Y.-D. et al. Mixture discrepancy for quasi-random point
# sets. Journal of Complexity, 29 (3-4), pp. 283-301, 2013.
# Example 4 on Page 298
sample = np.array([[2, 1, 1, 2, 2, 2],
[1, 2, 2, 2, 2, 2],
[2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2],
[1, 2, 2, 2, 1, 1],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 2, 2]])
sample = (2.0 * sample - 1.0) / (2.0 * 2.0)
assert_allclose(qmc.discrepancy(sample, method='MD'), 2.5000,
atol=1e-4)
assert_allclose(qmc.discrepancy(sample, method='WD'), 1.3680,
atol=1e-4)
assert_allclose(qmc.discrepancy(sample, method='CD'), 0.3172,
atol=1e-4)
# From Tim P. et al. Minimizing the L2 and Linf star discrepancies
# of a single point in the unit hypercube. JCAM, 2005
# Table 1 on Page 283
for dim in [2, 4, 8, 16, 32, 64]:
ref = np.sqrt(3**(-dim))
assert_allclose(qmc.discrepancy(np.array([[1]*dim]),
method='L2-star'), ref)
def test_discrepancy_errors(self):
sample = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
with pytest.raises(
ValueError, match=r"Sample is not in unit hypercube"
):
qmc.discrepancy(sample)
with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
qmc.discrepancy([1, 3])
sample = [[0, 0], [1, 1], [0.5, 0.5]]
with pytest.raises(ValueError, match=r"'toto' is not a valid ..."):
qmc.discrepancy(sample, method="toto")
def test_discrepancy_parallel(self, monkeypatch):
sample = np.array([[2, 1, 1, 2, 2, 2],
[1, 2, 2, 2, 2, 2],
[2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2],
[1, 2, 2, 2, 1, 1],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 2, 2]])
sample = (2.0 * sample - 1.0) / (2.0 * 2.0)
assert_allclose(qmc.discrepancy(sample, method='MD', workers=8),
2.5000,
atol=1e-4)
assert_allclose(qmc.discrepancy(sample, method='WD', workers=8),
1.3680,
atol=1e-4)
assert_allclose(qmc.discrepancy(sample, method='CD', workers=8),
0.3172,
atol=1e-4)
# From Tim P. et al. Minimizing the L2 and Linf star discrepancies
# of a single point in the unit hypercube. JCAM, 2005
# Table 1 on Page 283
for dim in [2, 4, 8, 16, 32, 64]:
ref = np.sqrt(3 ** (-dim))
assert_allclose(qmc.discrepancy(np.array([[1] * dim]),
method='L2-star', workers=-1), ref)
monkeypatch.setattr(os, 'cpu_count', lambda: None)
with pytest.raises(NotImplementedError, match="Cannot determine the"):
qmc.discrepancy(sample, workers=-1)
with pytest.raises(ValueError, match="Invalid number of workers..."):
qmc.discrepancy(sample, workers=-2)
def test_update_discrepancy(self):
# From Fang et al. Design and modeling for computer experiments, 2006
space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0)
disc_init = qmc.discrepancy(space_1[:-1], iterative=True)
disc_iter = update_discrepancy(space_1[-1], space_1[:-1], disc_init)
assert_allclose(disc_iter, 0.0081, atol=1e-4)
# n<d
rng = np.random.default_rng(241557431858162136881731220526394276199)
space_1 = rng.random((4, 10))
disc_ref = qmc.discrepancy(space_1)
disc_init = qmc.discrepancy(space_1[:-1], iterative=True)
disc_iter = update_discrepancy(space_1[-1], space_1[:-1], disc_init)
assert_allclose(disc_iter, disc_ref, atol=1e-4)
# errors
with pytest.raises(ValueError, match=r"Sample is not in unit "
r"hypercube"):
update_discrepancy(space_1[-1], space_1[:-1] + 1, disc_init)
with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
update_discrepancy(space_1[-1], space_1[0], disc_init)
x_new = [1, 3]
with pytest.raises(ValueError, match=r"x_new is not in unit "
r"hypercube"):
update_discrepancy(x_new, space_1[:-1], disc_init)
x_new = [[0.5, 0.5]]
with pytest.raises(ValueError, match=r"x_new is not a 1D array"):
update_discrepancy(x_new, space_1[:-1], disc_init)
x_new = [0.3, 0.1, 0]
with pytest.raises(ValueError, match=r"x_new and sample must be "
r"broadcastable"):
update_discrepancy(x_new, space_1[:-1], disc_init)
def test_perm_discrepancy(self):
rng = np.random.default_rng(46449423132557934943847369749645759997)
qmc_gen = qmc.LatinHypercube(5, seed=rng)
sample = qmc_gen.random(10)
disc = qmc.discrepancy(sample)
for i in range(100):
row_1 = rng.integers(10)
row_2 = rng.integers(10)
col = rng.integers(5)
disc = _perturb_discrepancy(sample, row_1, row_2, col, disc)
sample[row_1, col], sample[row_2, col] = (
sample[row_2, col], sample[row_1, col])
disc_reference = qmc.discrepancy(sample)
assert_allclose(disc, disc_reference)
def test_discrepancy_alternative_implementation(self):
"""Alternative definitions from Matt Haberland."""
def disc_c2(x):
n, s = x.shape
xij = x
disc1 = np.sum(np.prod((1
+ 1/2*np.abs(xij-0.5)
- 1/2*np.abs(xij-0.5)**2), axis=1))
xij = x[None, :, :]
xkj = x[:, None, :]
disc2 = np.sum(np.sum(np.prod(1
+ 1/2*np.abs(xij - 0.5)
+ 1/2*np.abs(xkj - 0.5)
- 1/2*np.abs(xij - xkj), axis=2),
axis=0))
return (13/12)**s - 2/n * disc1 + 1/n**2*disc2
def disc_wd(x):
n, s = x.shape
xij = x[None, :, :]
xkj = x[:, None, :]
disc = np.sum(np.sum(np.prod(3/2
- np.abs(xij - xkj)
+ np.abs(xij - xkj)**2, axis=2),
axis=0))
return -(4/3)**s + 1/n**2 * disc
def disc_md(x):
n, s = x.shape
xij = x
disc1 = np.sum(np.prod((5/3
- 1/4*np.abs(xij-0.5)
- 1/4*np.abs(xij-0.5)**2), axis=1))
xij = x[None, :, :]
xkj = x[:, None, :]
disc2 = np.sum(np.sum(np.prod(15/8
- 1/4*np.abs(xij - 0.5)
- 1/4*np.abs(xkj - 0.5)
- 3/4*np.abs(xij - xkj)
+ 1/2*np.abs(xij - xkj)**2,
axis=2), axis=0))
return (19/12)**s - 2/n * disc1 + 1/n**2*disc2
def disc_star_l2(x):
n, s = x.shape
return np.sqrt(
3 ** (-s) - 2 ** (1 - s) / n
* np.sum(np.prod(1 - x ** 2, axis=1))
+ np.sum([
np.prod(1 - np.maximum(x[k, :], x[j, :]))
for k in range(n) for j in range(n)
]) / n ** 2
)
rng = np.random.default_rng(117065081482921065782761407107747179201)
sample = rng.random((30, 10))
disc_curr = qmc.discrepancy(sample, method='CD')
disc_alt = disc_c2(sample)
assert_allclose(disc_curr, disc_alt)
disc_curr = qmc.discrepancy(sample, method='WD')
disc_alt = disc_wd(sample)
assert_allclose(disc_curr, disc_alt)
disc_curr = qmc.discrepancy(sample, method='MD')
disc_alt = disc_md(sample)
assert_allclose(disc_curr, disc_alt)
disc_curr = qmc.discrepancy(sample, method='L2-star')
disc_alt = disc_star_l2(sample)
assert_allclose(disc_curr, disc_alt)
def test_n_primes(self):
primes = n_primes(10)
assert primes[-1] == 29
primes = n_primes(168)
assert primes[-1] == 997
primes = n_primes(350)
assert primes[-1] == 2357
def test_primes(self):
primes = primes_from_2_to(50)
out = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
assert_allclose(primes, out)
class TestVDC:
def test_van_der_corput(self):
sample = van_der_corput(10)
out = [0.0, 0.5, 0.25, 0.75, 0.125, 0.625,
0.375, 0.875, 0.0625, 0.5625]
assert_allclose(sample, out)
sample = van_der_corput(10, workers=4)
assert_allclose(sample, out)
sample = van_der_corput(10, workers=8)
assert_allclose(sample, out)
sample = van_der_corput(7, start_index=3)
assert_allclose(sample, out[3:])
def test_van_der_corput_scramble(self):
seed = 338213789010180879520345496831675783177
out = van_der_corput(10, scramble=True, seed=seed)
sample = van_der_corput(7, start_index=3, scramble=True, seed=seed)
assert_allclose(sample, out[3:])
sample = van_der_corput(
7, start_index=3, scramble=True, seed=seed, workers=4
)
assert_allclose(sample, out[3:])
sample = van_der_corput(
7, start_index=3, scramble=True, seed=seed, workers=8
)
assert_allclose(sample, out[3:])
def test_invalid_base_error(self):
with pytest.raises(ValueError, match=r"'base' must be at least 2"):
van_der_corput(10, base=1)
class RandomEngine(qmc.QMCEngine):
def __init__(self, d, optimization=None, seed=None):
super().__init__(d=d, optimization=optimization, seed=seed)
def _random(self, n=1, *, workers=1):
sample = self.rng.random((n, self.d))
return sample
def test_subclassing_QMCEngine():
engine = RandomEngine(2, seed=175180605424926556207367152557812293274)
sample_1 = engine.random(n=5)
sample_2 = engine.random(n=7)
assert engine.num_generated == 12
# reset and re-sample
engine.reset()
assert engine.num_generated == 0
sample_1_test = engine.random(n=5)
assert_equal(sample_1, sample_1_test)
# repeat reset and fast forward
engine.reset()
engine.fast_forward(n=5)
sample_2_test = engine.random(n=7)
assert_equal(sample_2, sample_2_test)
assert engine.num_generated == 12
def test_raises():
# input validation
with pytest.raises(ValueError, match=r"d must be a non-negative integer"):
RandomEngine((2,)) # noqa
with pytest.raises(ValueError, match=r"d must be a non-negative integer"):
RandomEngine(-1) # noqa
msg = r"'u_bounds' and 'l_bounds' must be integers"
with pytest.raises(ValueError, match=msg):
engine = RandomEngine(1)
engine.integers(l_bounds=1, u_bounds=1.1)
def test_integers():
engine = RandomEngine(1, seed=231195739755290648063853336582377368684)
# basic tests
sample = engine.integers(1, n=10)
assert_equal(np.unique(sample), [0])
assert sample.dtype == np.dtype('int64')
sample = engine.integers(1, n=10, endpoint=True)
assert_equal(np.unique(sample), [0, 1])
low = -5
high = 7
# scaling logic
engine.reset()
ref_sample = engine.random(20)
ref_sample = ref_sample * (high - low) + low
ref_sample = np.floor(ref_sample).astype(np.int64)
engine.reset()
sample = engine.integers(low, u_bounds=high, n=20, endpoint=False)
assert_equal(sample, ref_sample)
# up to bounds, no less, no more
sample = engine.integers(low, u_bounds=high, n=100, endpoint=False)
assert_equal((sample.min(), sample.max()), (low, high-1))
sample = engine.integers(low, u_bounds=high, n=100, endpoint=True)
assert_equal((sample.min(), sample.max()), (low, high))
def test_integers_nd():
d = 10
rng = np.random.default_rng(3716505122102428560615700415287450951)
low = rng.integers(low=-5, high=-1, size=d)
high = rng.integers(low=1, high=5, size=d, endpoint=True)
engine = RandomEngine(d, seed=rng)
sample = engine.integers(low, u_bounds=high, n=100, endpoint=False)
assert_equal(sample.min(axis=0), low)
assert_equal(sample.max(axis=0), high-1)
sample = engine.integers(low, u_bounds=high, n=100, endpoint=True)
assert_equal(sample.min(axis=0), low)
assert_equal(sample.max(axis=0), high)
class QMCEngineTests:
"""Generic tests for QMC engines."""
qmce = NotImplemented
can_scramble = NotImplemented
unscramble_nd = NotImplemented
scramble_nd = NotImplemented
scramble = [True, False]
ids = ["Scrambled", "Unscrambled"]
def engine(
self, scramble: bool,
seed=170382760648021597650530316304495310428,
**kwargs
) -> QMCEngine:
if self.can_scramble:
return self.qmce(scramble=scramble, seed=seed, **kwargs)
else:
if scramble:
pytest.skip()
else:
return self.qmce(seed=seed, **kwargs)
def reference(self, scramble: bool) -> np.ndarray:
return self.scramble_nd if scramble else self.unscramble_nd
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_0dim(self, scramble):
engine = self.engine(d=0, scramble=scramble)
sample = engine.random(4)
assert_array_equal(np.empty((4, 0)), sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_0sample(self, scramble):
engine = self.engine(d=2, scramble=scramble)
sample = engine.random(0)
assert_array_equal(np.empty((0, 2)), sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_1sample(self, scramble):
engine = self.engine(d=2, scramble=scramble)
sample = engine.random(1)
assert (1, 2) == sample.shape
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_bounds(self, scramble):
engine = self.engine(d=100, scramble=scramble)
sample = engine.random(512)
assert np.all(sample >= 0)
assert np.all(sample <= 1)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_sample(self, scramble):
ref_sample = self.reference(scramble=scramble)
engine = self.engine(d=2, scramble=scramble)
sample = engine.random(n=len(ref_sample))
assert_allclose(sample, ref_sample, atol=1e-1)
assert engine.num_generated == len(ref_sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_continuing(self, scramble):
engine = self.engine(d=2, scramble=scramble)
ref_sample = engine.random(n=8)
engine = self.engine(d=2, scramble=scramble)
n_half = len(ref_sample) // 2
_ = engine.random(n=n_half)
sample = engine.random(n=n_half)
assert_allclose(sample, ref_sample[n_half:], atol=1e-1)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
@pytest.mark.parametrize(
"seed",
(
170382760648021597650530316304495310428,
np.random.default_rng(170382760648021597650530316304495310428),
None,
),
)
def test_reset(self, scramble, seed):
engine = self.engine(d=2, scramble=scramble, seed=seed)
ref_sample = engine.random(n=8)
engine.reset()
assert engine.num_generated == 0
sample = engine.random(n=8)
assert_allclose(sample, ref_sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_fast_forward(self, scramble):
engine = self.engine(d=2, scramble=scramble)
ref_sample = engine.random(n=8)
engine = self.engine(d=2, scramble=scramble)
engine.fast_forward(4)
sample = engine.random(n=4)
assert_allclose(sample, ref_sample[4:], atol=1e-1)
# alternate fast forwarding with sampling
engine.reset()
even_draws = []
for i in range(8):
if i % 2 == 0:
even_draws.append(engine.random())
else:
engine.fast_forward(1)
assert_allclose(
ref_sample[[i for i in range(8) if i % 2 == 0]],
np.concatenate(even_draws),
atol=1e-5
)
@pytest.mark.parametrize("scramble", [True])
def test_distribution(self, scramble):
d = 50
engine = self.engine(d=d, scramble=scramble)
sample = engine.random(1024)
assert_allclose(
np.mean(sample, axis=0), np.repeat(0.5, d), atol=1e-2
)
assert_allclose(
np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=1e-2
)
assert_allclose(
np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=1e-2
)
def test_raises_optimizer(self):
message = r"'toto' is not a valid optimization method"
with pytest.raises(ValueError, match=message):
self.engine(d=1, scramble=False, optimization="toto")
@pytest.mark.parametrize(
"optimization,metric",
[
("random-CD", qmc.discrepancy),
("lloyd", lambda sample: -_l1_norm(sample))]
)
def test_optimizers(self, optimization, metric):
engine = self.engine(d=2, scramble=False)
sample_ref = engine.random(n=64)
metric_ref = metric(sample_ref)
optimal_ = self.engine(d=2, scramble=False, optimization=optimization)
sample_ = optimal_.random(n=64)
metric_ = metric(sample_)
assert metric_ < metric_ref
def test_consume_prng_state(self):
rng = np.random.default_rng(0xa29cabb11cfdf44ff6cac8bec254c2a0)
sample = []
for i in range(3):
engine = self.engine(d=2, scramble=True, seed=rng)
sample.append(engine.random(4))
with pytest.raises(AssertionError, match="Arrays are not equal"):
assert_equal(sample[0], sample[1])
with pytest.raises(AssertionError, match="Arrays are not equal"):
assert_equal(sample[0], sample[2])
class TestHalton(QMCEngineTests):
qmce = qmc.Halton
can_scramble = True
# theoretical values known from Van der Corput
unscramble_nd = np.array([[0, 0], [1 / 2, 1 / 3],
[1 / 4, 2 / 3], [3 / 4, 1 / 9],
[1 / 8, 4 / 9], [5 / 8, 7 / 9],
[3 / 8, 2 / 9], [7 / 8, 5 / 9]])
# theoretical values unknown: convergence properties checked
scramble_nd = np.array([[0.50246036, 0.93382481],
[0.00246036, 0.26715815],
[0.75246036, 0.60049148],
[0.25246036, 0.8227137 ],
[0.62746036, 0.15604704],
[0.12746036, 0.48938037],
[0.87746036, 0.71160259],
[0.37746036, 0.04493592]])
def test_workers(self):
ref_sample = self.reference(scramble=True)
engine = self.engine(d=2, scramble=True)
sample = engine.random(n=len(ref_sample), workers=8)
assert_allclose(sample, ref_sample, atol=1e-3)
# worker + integers
engine.reset()
ref_sample = engine.integers(10)
engine.reset()
sample = engine.integers(10, workers=8)
assert_equal(sample, ref_sample)
class TestLHS(QMCEngineTests):
qmce = qmc.LatinHypercube
can_scramble = True
def test_continuing(self, *args):
pytest.skip("Not applicable: not a sequence.")
def test_fast_forward(self, *args):
pytest.skip("Not applicable: not a sequence.")
def test_sample(self, *args):
pytest.skip("Not applicable: the value of reference sample is"
" implementation dependent.")
@pytest.mark.parametrize("strength", [1, 2])
@pytest.mark.parametrize("scramble", [False, True])
@pytest.mark.parametrize("optimization", [None, "random-CD"])
def test_sample_stratified(self, optimization, scramble, strength):
seed = np.random.default_rng(37511836202578819870665127532742111260)
p = 5
n = p**2
d = 6
engine = qmc.LatinHypercube(d=d, scramble=scramble,
strength=strength,
optimization=optimization,
seed=seed)
sample = engine.random(n=n)
assert sample.shape == (n, d)
assert engine.num_generated == n
# centering stratifies samples in the middle of equal segments:
# * inter-sample distance is constant in 1D sub-projections
# * after ordering, columns are equal
expected1d = (np.arange(n) + 0.5) / n
expected = np.broadcast_to(expected1d, (d, n)).T
assert np.any(sample != expected)
sorted_sample = np.sort(sample, axis=0)
tol = 0.5 / n if scramble else 0
assert_allclose(sorted_sample, expected, atol=tol)
assert np.any(sample - expected > tol)
if strength == 2 and optimization is None:
unique_elements = np.arange(p)
desired = set(product(unique_elements, unique_elements))
for i, j in combinations(range(engine.d), 2):
samples_2d = sample[:, [i, j]]
res = (samples_2d * p).astype(int)
res_set = {tuple(row) for row in res}
assert_equal(res_set, desired)
def test_optimizer_1d(self):
# discrepancy measures are invariant under permuting factors and runs
engine = self.engine(d=1, scramble=False)
sample_ref = engine.random(n=64)
optimal_ = self.engine(d=1, scramble=False, optimization="random-CD")
sample_ = optimal_.random(n=64)
assert_array_equal(sample_ref, sample_)
def test_raises(self):
message = r"not a valid strength"
with pytest.raises(ValueError, match=message):
qmc.LatinHypercube(1, strength=3)
message = r"n is not the square of a prime number"
with pytest.raises(ValueError, match=message):
engine = qmc.LatinHypercube(d=2, strength=2)
engine.random(16)
message = r"n is not the square of a prime number"
with pytest.raises(ValueError, match=message):
engine = qmc.LatinHypercube(d=2, strength=2)
engine.random(5) # because int(sqrt(5)) would result in 2
message = r"n is too small for d"
with pytest.raises(ValueError, match=message):
engine = qmc.LatinHypercube(d=5, strength=2)
engine.random(9)
class TestSobol(QMCEngineTests):
qmce = qmc.Sobol
can_scramble = True
# theoretical values from Joe Kuo2010
unscramble_nd = np.array([[0., 0.],
[0.5, 0.5],
[0.75, 0.25],
[0.25, 0.75],
[0.375, 0.375],
[0.875, 0.875],
[0.625, 0.125],
[0.125, 0.625]])
# theoretical values unknown: convergence properties checked
scramble_nd = np.array([[0.25331921, 0.41371179],
[0.8654213, 0.9821167],
[0.70097554, 0.03664616],
[0.18027647, 0.60895735],
[0.10521339, 0.21897069],
[0.53019685, 0.66619033],
[0.91122276, 0.34580743],
[0.45337471, 0.78912079]])
def test_warning(self):
with pytest.warns(UserWarning, match=r"The balance properties of "
r"Sobol' points"):
engine = qmc.Sobol(1)
engine.random(10)
def test_random_base2(self):
engine = qmc.Sobol(2, scramble=False)
sample = engine.random_base2(2)
assert_array_equal(self.unscramble_nd[:4], sample)
# resampling still having N=2**n
sample = engine.random_base2(2)
assert_array_equal(self.unscramble_nd[4:8], sample)
# resampling again but leading to N!=2**n
with pytest.raises(ValueError, match=r"The balance properties of "
r"Sobol' points"):
engine.random_base2(2)
def test_raise(self):
with pytest.raises(ValueError, match=r"Maximum supported "
r"dimensionality"):
qmc.Sobol(qmc.Sobol.MAXDIM + 1)
with pytest.raises(ValueError, match=r"Maximum supported "
r"'bits' is 64"):
qmc.Sobol(1, bits=65)
def test_high_dim(self):
engine = qmc.Sobol(1111, scramble=False)
count1 = Counter(engine.random().flatten().tolist())
count2 = Counter(engine.random().flatten().tolist())
assert_equal(count1, Counter({0.0: 1111}))
assert_equal(count2, Counter({0.5: 1111}))
@pytest.mark.parametrize("bits", [2, 3])
def test_bits(self, bits):
engine = qmc.Sobol(2, scramble=False, bits=bits)
ns = 2**bits
sample = engine.random(ns)
assert_array_equal(self.unscramble_nd[:ns], sample)
with pytest.raises(ValueError, match="increasing `bits`"):
engine.random()
def test_64bits(self):
engine = qmc.Sobol(2, scramble=False, bits=64)
sample = engine.random(8)
assert_array_equal(self.unscramble_nd, sample)
class TestPoisson(QMCEngineTests):
qmce = qmc.PoissonDisk
can_scramble = False
def test_bounds(self, *args):
pytest.skip("Too costly in memory.")
def test_fast_forward(self, *args):
pytest.skip("Not applicable: recursive process.")
def test_sample(self, *args):
pytest.skip("Not applicable: the value of reference sample is"
" implementation dependent.")
def test_continuing(self, *args):
# can continue a sampling, but will not preserve the same order
# because candidates are lost, so we will not select the same center
radius = 0.05
ns = 6
engine = self.engine(d=2, radius=radius, scramble=False)
sample_init = engine.random(n=ns)
assert len(sample_init) <= ns
assert l2_norm(sample_init) >= radius
sample_continued = engine.random(n=ns)
assert len(sample_continued) <= ns
assert l2_norm(sample_continued) >= radius
sample = np.concatenate([sample_init, sample_continued], axis=0)
assert len(sample) <= ns * 2
assert l2_norm(sample) >= radius
def test_mindist(self):
rng = np.random.default_rng(132074951149370773672162394161442690287)
ns = 50
low, high = 0.08, 0.2
radii = (high - low) * rng.random(5) + low
dimensions = [1, 3, 4]
hypersphere_methods = ["volume", "surface"]
gen = product(dimensions, radii, hypersphere_methods)
for d, radius, hypersphere in gen:
engine = self.qmce(
d=d, radius=radius, hypersphere=hypersphere, seed=rng
)
sample = engine.random(ns)
assert len(sample) <= ns
assert l2_norm(sample) >= radius
def test_fill_space(self):
radius = 0.2
engine = self.qmce(d=2, radius=radius)
sample = engine.fill_space()
# circle packing problem is np complex
assert l2_norm(sample) >= radius
def test_raises(self):
message = r"'toto' is not a valid hypersphere sampling"
with pytest.raises(ValueError, match=message):
qmc.PoissonDisk(1, hypersphere="toto")
class TestMultinomialQMC:
def test_validations(self):
# negative Ps
p = np.array([0.12, 0.26, -0.05, 0.35, 0.22])
with pytest.raises(ValueError, match=r"Elements of pvals must "
r"be non-negative."):
qmc.MultinomialQMC(p, n_trials=10)
# sum of P too large
p = np.array([0.12, 0.26, 0.1, 0.35, 0.22])
message = r"Elements of pvals must sum to 1."
with pytest.raises(ValueError, match=message):
qmc.MultinomialQMC(p, n_trials=10)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
message = r"Dimension of `engine` must be 1."
with pytest.raises(ValueError, match=message):
qmc.MultinomialQMC(p, n_trials=10, engine=qmc.Sobol(d=2))
message = r"`engine` must be an instance of..."
with pytest.raises(ValueError, match=message):
qmc.MultinomialQMC(p, n_trials=10, engine=np.random.default_rng())
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_MultinomialBasicDraw(self):
seed = np.random.default_rng(6955663962957011631562466584467607969)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
n_trials = 100
expected = np.atleast_2d(n_trials * p).astype(int)
engine = qmc.MultinomialQMC(p, n_trials=n_trials, seed=seed)
assert_allclose(engine.random(1), expected, atol=1)
def test_MultinomialDistribution(self):
seed = np.random.default_rng(77797854505813727292048130876699859000)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
engine = qmc.MultinomialQMC(p, n_trials=8192, seed=seed)
draws = engine.random(1)
assert_allclose(draws / np.sum(draws), np.atleast_2d(p), atol=1e-4)
def test_FindIndex(self):
p_cumulative = np.array([0.1, 0.4, 0.45, 0.6, 0.75, 0.9, 0.99, 1.0])
size = len(p_cumulative)
assert_equal(_test_find_index(p_cumulative, size, 0.0), 0)
assert_equal(_test_find_index(p_cumulative, size, 0.4), 2)
assert_equal(_test_find_index(p_cumulative, size, 0.44999), 2)
assert_equal(_test_find_index(p_cumulative, size, 0.45001), 3)
assert_equal(_test_find_index(p_cumulative, size, 1.0), size - 1)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_other_engine(self):
# same as test_MultinomialBasicDraw with different engine
seed = np.random.default_rng(283753519042773243071753037669078065412)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
n_trials = 100
expected = np.atleast_2d(n_trials * p).astype(int)
base_engine = qmc.Sobol(1, scramble=True, seed=seed)
engine = qmc.MultinomialQMC(p, n_trials=n_trials, engine=base_engine,
seed=seed)
assert_allclose(engine.random(1), expected, atol=1)
class TestNormalQMC:
def test_NormalQMC(self):
# d = 1
engine = qmc.MultivariateNormalQMC(mean=np.zeros(1))
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2))
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
def test_NormalQMCInvTransform(self):
# d = 1
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(1), inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
def test_NormalQMCSeeded(self):
# test even dimension
seed = np.random.default_rng(274600237797326520096085022671371676017)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=False, seed=seed)
samples = engine.random(n=2)
samples_expected = np.array([[-0.932001, -0.522923],
[-1.477655, 0.846851]])
assert_allclose(samples, samples_expected, atol=1e-4)
# test odd dimension
seed = np.random.default_rng(274600237797326520096085022671371676017)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), inv_transform=False, seed=seed)
samples = engine.random(n=2)
samples_expected = np.array([[-0.932001, -0.522923, 0.036578],
[-1.778011, 0.912428, -0.065421]])
assert_allclose(samples, samples_expected, atol=1e-4)
# same test with another engine
seed = np.random.default_rng(274600237797326520096085022671371676017)
base_engine = qmc.Sobol(4, scramble=True, seed=seed)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), inv_transform=False,
engine=base_engine, seed=seed
)
samples = engine.random(n=2)
samples_expected = np.array([[-0.932001, -0.522923, 0.036578],
[-1.778011, 0.912428, -0.065421]])
assert_allclose(samples, samples_expected, atol=1e-4)
def test_NormalQMCSeededInvTransform(self):
# test even dimension
seed = np.random.default_rng(288527772707286126646493545351112463929)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), seed=seed, inv_transform=True)
samples = engine.random(n=2)
samples_expected = np.array([[-0.913237, -0.964026],
[0.255904, 0.003068]])
assert_allclose(samples, samples_expected, atol=1e-4)
# test odd dimension
seed = np.random.default_rng(288527772707286126646493545351112463929)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), seed=seed, inv_transform=True)
samples = engine.random(n=2)
samples_expected = np.array([[-0.913237, -0.964026, 0.355501],
[0.699261, 2.90213 , -0.6418]])
assert_allclose(samples, samples_expected, atol=1e-4)
def test_other_engine(self):
for d in (0, 1, 2):
base_engine = qmc.Sobol(d=d, scramble=False)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(d),
engine=base_engine,
inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, d))
def test_NormalQMCShapiro(self):
rng = np.random.default_rng(13242)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2), seed=rng)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
def test_NormalQMCShapiroInvTransform(self):
rng = np.random.default_rng(32344554)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=True, seed=rng)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
class TestMultivariateNormalQMC:
def test_validations(self):
message = r"Dimension of `engine` must be consistent"
with pytest.raises(ValueError, match=message):
qmc.MultivariateNormalQMC([0], engine=qmc.Sobol(d=2))
message = r"Dimension of `engine` must be consistent"
with pytest.raises(ValueError, match=message):
qmc.MultivariateNormalQMC([0, 0, 0], engine=qmc.Sobol(d=4))
message = r"`engine` must be an instance of..."
with pytest.raises(ValueError, match=message):
qmc.MultivariateNormalQMC([0, 0], engine=np.random.default_rng())
message = r"Covariance matrix not PSD."
with pytest.raises(ValueError, match=message):
qmc.MultivariateNormalQMC([0, 0], [[1, 2], [2, 1]])
message = r"Covariance matrix is not symmetric."
with pytest.raises(ValueError, match=message):
qmc.MultivariateNormalQMC([0, 0], [[1, 0], [2, 1]])
message = r"Dimension mismatch between mean and covariance."
with pytest.raises(ValueError, match=message):
qmc.MultivariateNormalQMC([0], [[1, 0], [0, 1]])
def test_MultivariateNormalQMCNonPD(self):
# try with non-pd but psd cov; should work
engine = qmc.MultivariateNormalQMC(
[0, 0, 0], [[1, 0, 1], [0, 1, 1], [1, 1, 2]],
)
assert engine._corr_matrix is not None
def test_MultivariateNormalQMC(self):
# d = 1 scalar
engine = qmc.MultivariateNormalQMC(mean=0, cov=5)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2 list
engine = qmc.MultivariateNormalQMC(mean=[0, 1], cov=[[1, 0], [0, 1]])
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
# d = 3 np.array
mean = np.array([0, 1, 2])
cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
engine = qmc.MultivariateNormalQMC(mean, cov)
samples = engine.random()
assert_equal(samples.shape, (1, 3))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 3))
def test_MultivariateNormalQMCInvTransform(self):
# d = 1 scalar
engine = qmc.MultivariateNormalQMC(mean=0, cov=5, inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2 list
engine = qmc.MultivariateNormalQMC(
mean=[0, 1], cov=[[1, 0], [0, 1]], inv_transform=True,
)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
# d = 3 np.array
mean = np.array([0, 1, 2])
cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
engine = qmc.MultivariateNormalQMC(mean, cov, inv_transform=True)
samples = engine.random()
assert_equal(samples.shape, (1, 3))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 3))
def test_MultivariateNormalQMCSeeded(self):
# test even dimension
rng = np.random.default_rng(180182791534511062935571481899241825000)
a = rng.standard_normal((2, 2))
A = a @ a.transpose() + np.diag(rng.random(2))
engine = qmc.MultivariateNormalQMC(np.array([0, 0]), A,
inv_transform=False, seed=rng)
samples = engine.random(n=2)
samples_expected = np.array([[-0.64419, -0.882413],
[0.837199, 2.045301]])
assert_allclose(samples, samples_expected, atol=1e-4)
# test odd dimension
rng = np.random.default_rng(180182791534511062935571481899241825000)
a = rng.standard_normal((3, 3))
A = a @ a.transpose() + np.diag(rng.random(3))
engine = qmc.MultivariateNormalQMC(np.array([0, 0, 0]), A,
inv_transform=False, seed=rng)
samples = engine.random(n=2)
samples_expected = np.array([[-0.693853, -1.265338, -0.088024],
[1.620193, 2.679222, 0.457343]])
assert_allclose(samples, samples_expected, atol=1e-4)
def test_MultivariateNormalQMCSeededInvTransform(self):
# test even dimension
rng = np.random.default_rng(224125808928297329711992996940871155974)
a = rng.standard_normal((2, 2))
A = a @ a.transpose() + np.diag(rng.random(2))
engine = qmc.MultivariateNormalQMC(
np.array([0, 0]), A, seed=rng, inv_transform=True
)
samples = engine.random(n=2)
samples_expected = np.array([[0.682171, -3.114233],
[-0.098463, 0.668069]])
assert_allclose(samples, samples_expected, atol=1e-4)
# test odd dimension
rng = np.random.default_rng(224125808928297329711992996940871155974)
a = rng.standard_normal((3, 3))
A = a @ a.transpose() + np.diag(rng.random(3))
engine = qmc.MultivariateNormalQMC(
np.array([0, 0, 0]), A, seed=rng, inv_transform=True
)
samples = engine.random(n=2)
samples_expected = np.array([[0.988061, -1.644089, -0.877035],
[-1.771731, 1.096988, 2.024744]])
assert_allclose(samples, samples_expected, atol=1e-4)
def test_MultivariateNormalQMCShapiro(self):
# test the standard case
seed = np.random.default_rng(188960007281846377164494575845971640)
engine = qmc.MultivariateNormalQMC(
mean=[0, 0], cov=[[1, 0], [0, 1]], seed=seed
)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
# test the correlated, non-zero mean case
engine = qmc.MultivariateNormalQMC(
mean=[1.0, 2.0], cov=[[1.5, 0.5], [0.5, 1.5]], seed=seed
)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0) - [1, 2]) < 1e-2)
assert all(np.abs(samples.std(axis=0) - np.sqrt(1.5)) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# check covariance
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1] - 0.5) < 1e-2
def test_MultivariateNormalQMCShapiroInvTransform(self):
# test the standard case
seed = np.random.default_rng(200089821034563288698994840831440331329)
engine = qmc.MultivariateNormalQMC(
mean=[0, 0], cov=[[1, 0], [0, 1]], seed=seed, inv_transform=True
)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
# test the correlated, non-zero mean case
engine = qmc.MultivariateNormalQMC(
mean=[1.0, 2.0],
cov=[[1.5, 0.5], [0.5, 1.5]],
seed=seed,
inv_transform=True,
)
samples = engine.random(n=256)
assert all(np.abs(samples.mean(axis=0) - [1, 2]) < 1e-2)
assert all(np.abs(samples.std(axis=0) - np.sqrt(1.5)) < 1e-2)
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert pval > 0.9
# check covariance
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1] - 0.5) < 1e-2
def test_MultivariateNormalQMCDegenerate(self):
# X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
seed = np.random.default_rng(16320637417581448357869821654290448620)
engine = qmc.MultivariateNormalQMC(
mean=[0.0, 0.0, 0.0],
cov=[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 2.0]],
seed=seed,
)
samples = engine.random(n=512)
assert all(np.abs(samples.mean(axis=0)) < 1e-2)
assert np.abs(np.std(samples[:, 0]) - 1) < 1e-2
assert np.abs(np.std(samples[:, 1]) - 1) < 1e-2
assert np.abs(np.std(samples[:, 2]) - np.sqrt(2)) < 1e-2
for i in (0, 1, 2):
_, pval = shapiro(samples[:, i])
assert pval > 0.8
cov = np.cov(samples.transpose())
assert np.abs(cov[0, 1]) < 1e-2
assert np.abs(cov[0, 2] - 1) < 1e-2
# check to see if X + Y = Z almost exactly
assert all(np.abs(samples[:, 0] + samples[:, 1] - samples[:, 2])
< 1e-5)
class TestLloyd:
def test_lloyd(self):
# quite sensible seed as it can go up before going further down
rng = np.random.RandomState(1809831)
sample = rng.uniform(0, 1, size=(128, 2))
base_l1 = _l1_norm(sample)
base_l2 = l2_norm(sample)
for _ in range(4):
sample_lloyd = _lloyd_centroidal_voronoi_tessellation(
sample, maxiter=1,
)
curr_l1 = _l1_norm(sample_lloyd)
curr_l2 = l2_norm(sample_lloyd)
# higher is better for the distance measures
assert base_l1 < curr_l1
assert base_l2 < curr_l2
base_l1 = curr_l1
base_l2 = curr_l2
sample = sample_lloyd
def test_lloyd_non_mutating(self):
"""
Verify that the input samples are not mutated in place and that they do
not share memory with the output.
"""
sample_orig = np.array([[0.1, 0.1],
[0.1, 0.2],
[0.2, 0.1],
[0.2, 0.2]])
sample_copy = sample_orig.copy()
new_sample = _lloyd_centroidal_voronoi_tessellation(
sample=sample_orig
)
assert_allclose(sample_orig, sample_copy)
assert not np.may_share_memory(sample_orig, new_sample)
def test_lloyd_errors(self):
with pytest.raises(ValueError, match=r"`sample` is not a 2D array"):
sample = [0, 1, 0.5]
_lloyd_centroidal_voronoi_tessellation(sample)
msg = r"`sample` dimension is not >= 2"
with pytest.raises(ValueError, match=msg):
sample = [[0], [0.4], [1]]
_lloyd_centroidal_voronoi_tessellation(sample)
msg = r"`sample` is not in unit hypercube"
with pytest.raises(ValueError, match=msg):
sample = [[-1.1, 0], [0.1, 0.4], [1, 2]]
_lloyd_centroidal_voronoi_tessellation(sample)
# mindist
def l2_norm(sample):
return distance.pdist(sample).min()
| 52,217
| 37.452135
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_censored_data.py
|
# Tests for the CensoredData class.
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import CensoredData
class TestCensoredData:
def test_basic(self):
uncensored = [1]
left = [0]
right = [2, 5]
interval = [[2, 3]]
data = CensoredData(uncensored, left=left, right=right,
interval=interval)
assert_equal(data._uncensored, uncensored)
assert_equal(data._left, left)
assert_equal(data._right, right)
assert_equal(data._interval, interval)
udata = data._uncensor()
assert_equal(udata, np.concatenate((uncensored, left, right,
np.mean(interval, axis=1))))
def test_right_censored(self):
x = np.array([0, 3, 2.5])
is_censored = np.array([0, 1, 0], dtype=bool)
data = CensoredData.right_censored(x, is_censored)
assert_equal(data._uncensored, x[~is_censored])
assert_equal(data._right, x[is_censored])
assert_equal(data._left, [])
assert_equal(data._interval, np.empty((0, 2)))
def test_left_censored(self):
x = np.array([0, 3, 2.5])
is_censored = np.array([0, 1, 0], dtype=bool)
data = CensoredData.left_censored(x, is_censored)
assert_equal(data._uncensored, x[~is_censored])
assert_equal(data._left, x[is_censored])
assert_equal(data._right, [])
assert_equal(data._interval, np.empty((0, 2)))
def test_interval_censored_basic(self):
a = [0.5, 2.0, 3.0, 5.5]
b = [1.0, 2.5, 3.5, 7.0]
data = CensoredData.interval_censored(low=a, high=b)
assert_array_equal(data._interval, np.array(list(zip(a, b))))
assert data._uncensored.shape == (0,)
assert data._left.shape == (0,)
assert data._right.shape == (0,)
def test_interval_censored_mixed(self):
# This is actually a mix of uncensored, left-censored, right-censored
# and interval-censored data. Check that when the `interval_censored`
# class method is used, the data is correctly separated into the
# appropriate arrays.
a = [0.5, -np.inf, -13.0, 2.0, 1.0, 10.0, -1.0]
b = [0.5, 2500.0, np.inf, 3.0, 1.0, 11.0, np.inf]
data = CensoredData.interval_censored(low=a, high=b)
assert_array_equal(data._interval, [[2.0, 3.0], [10.0, 11.0]])
assert_array_equal(data._uncensored, [0.5, 1.0])
assert_array_equal(data._left, [2500.0])
assert_array_equal(data._right, [-13.0, -1.0])
def test_interval_to_other_types(self):
# The interval parameter can represent uncensored and
# left- or right-censored data. Test the conversion of such
# an example to the canonical form in which the different
# types have been split into the separate arrays.
interval = np.array([[0, 1], # interval-censored
[2, 2], # not censored
[3, 3], # not censored
[9, np.inf], # right-censored
[8, np.inf], # right-censored
[-np.inf, 0], # left-censored
[1, 2]]) # interval-censored
data = CensoredData(interval=interval)
assert_equal(data._uncensored, [2, 3])
assert_equal(data._left, [0])
assert_equal(data._right, [9, 8])
assert_equal(data._interval, [[0, 1], [1, 2]])
def test_empty_arrays(self):
data = CensoredData(uncensored=[], left=[], right=[], interval=[])
assert data._uncensored.shape == (0,)
assert data._left.shape == (0,)
assert data._right.shape == (0,)
assert data._interval.shape == (0, 2)
assert len(data) == 0
def test_invalid_constructor_args(self):
with pytest.raises(ValueError, match='must be a one-dimensional'):
CensoredData(uncensored=[[1, 2, 3]])
with pytest.raises(ValueError, match='must be a one-dimensional'):
CensoredData(left=[[1, 2, 3]])
with pytest.raises(ValueError, match='must be a one-dimensional'):
CensoredData(right=[[1, 2, 3]])
with pytest.raises(ValueError, match='must be a two-dimensional'):
CensoredData(interval=[[1, 2, 3]])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(uncensored=[1, np.nan, 2])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(left=[1, np.nan, 2])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(right=[1, np.nan, 2])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(interval=[[1, np.nan], [2, 3]])
with pytest.raises(ValueError,
match='both values must not be infinite'):
CensoredData(interval=[[1, 3], [2, 9], [np.inf, np.inf]])
with pytest.raises(ValueError,
match='left value must not exceed the right'):
CensoredData(interval=[[1, 0], [2, 2]])
@pytest.mark.parametrize('func', [CensoredData.left_censored,
CensoredData.right_censored])
def test_invalid_left_right_censored_args(self, func):
with pytest.raises(ValueError,
match='`x` must be one-dimensional'):
func([[1, 2, 3]], [0, 1, 1])
with pytest.raises(ValueError,
match='`censored` must be one-dimensional'):
func([1, 2, 3], [[0, 1, 1]])
with pytest.raises(ValueError, match='`x` must not contain'):
func([1, 2, np.nan], [0, 1, 1])
with pytest.raises(ValueError, match='must have the same length'):
func([1, 2, 3], [0, 0, 1, 1])
def test_invalid_censored_args(self):
with pytest.raises(ValueError,
match='`low` must be a one-dimensional'):
CensoredData.interval_censored(low=[[3]], high=[4, 5])
with pytest.raises(ValueError,
match='`high` must be a one-dimensional'):
CensoredData.interval_censored(low=[3], high=[[4, 5]])
with pytest.raises(ValueError, match='`low` must not contain'):
CensoredData.interval_censored([1, 2, np.nan], [0, 1, 1])
with pytest.raises(ValueError, match='must have the same length'):
CensoredData.interval_censored([1, 2, 3], [0, 0, 1, 1])
def test_count_censored(self):
x = [1, 2, 3]
# data1 has no censored data.
data1 = CensoredData(x)
assert data1.num_censored() == 0
data2 = CensoredData(uncensored=[2.5], left=[10], interval=[[0, 1]])
assert data2.num_censored() == 2
| 6,935
| 44.333333
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_discrete_distns.py
|
import pytest
import itertools
from scipy.stats import (betabinom, hypergeom, nhypergeom, bernoulli,
boltzmann, skellam, zipf, zipfian, binom, nbinom,
nchypergeom_fisher, nchypergeom_wallenius, randint)
import numpy as np
from numpy.testing import (
assert_almost_equal, assert_equal, assert_allclose, suppress_warnings
)
from scipy.special import binom as special_binom
from scipy.optimize import root_scalar
from scipy.integrate import quad
# The expected values were computed with Wolfram Alpha, using
# the expression CDF[HypergeometricDistribution[N, n, M], k].
@pytest.mark.parametrize('k, M, n, N, expected, rtol',
[(3, 10, 4, 5,
0.9761904761904762, 1e-15),
(107, 10000, 3000, 215,
0.9999999997226765, 1e-15),
(10, 10000, 3000, 215,
2.681682217692179e-21, 5e-11)])
def test_hypergeom_cdf(k, M, n, N, expected, rtol):
p = hypergeom.cdf(k, M, n, N)
assert_allclose(p, expected, rtol=rtol)
# The expected values were computed with Wolfram Alpha, using
# the expression SurvivalFunction[HypergeometricDistribution[N, n, M], k].
@pytest.mark.parametrize('k, M, n, N, expected, rtol',
[(25, 10000, 3000, 215,
0.9999999999052958, 1e-15),
(125, 10000, 3000, 215,
1.4416781705752128e-18, 5e-11)])
def test_hypergeom_sf(k, M, n, N, expected, rtol):
p = hypergeom.sf(k, M, n, N)
assert_allclose(p, expected, rtol=rtol)
def test_hypergeom_logpmf():
# symmetries test
# f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K)
k = 5
N = 50
K = 10
n = 5
logpmf1 = hypergeom.logpmf(k, N, K, n)
logpmf2 = hypergeom.logpmf(n - k, N, N - K, n)
logpmf3 = hypergeom.logpmf(K - k, N, K, N - n)
logpmf4 = hypergeom.logpmf(k, N, n, K)
assert_almost_equal(logpmf1, logpmf2, decimal=12)
assert_almost_equal(logpmf1, logpmf3, decimal=12)
assert_almost_equal(logpmf1, logpmf4, decimal=12)
# test related distribution
# Bernoulli distribution if n = 1
k = 1
N = 10
K = 7
n = 1
hypergeom_logpmf = hypergeom.logpmf(k, N, K, n)
bernoulli_logpmf = bernoulli.logpmf(k, K/N)
assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
def test_nhypergeom_pmf():
# test with hypergeom
M, n, r = 45, 13, 8
k = 6
NHG = nhypergeom.pmf(k, M, n, r)
HG = hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
assert_allclose(HG, NHG, rtol=1e-10)
def test_nhypergeom_pmfcdf():
# test pmf and cdf with arbitrary values.
M = 8
n = 3
r = 4
support = np.arange(n+1)
pmf = nhypergeom.pmf(support, M, n, r)
cdf = nhypergeom.cdf(support, M, n, r)
assert_allclose(pmf, [1/14, 3/14, 5/14, 5/14], rtol=1e-13)
assert_allclose(cdf, [1/14, 4/14, 9/14, 1.0], rtol=1e-13)
def test_nhypergeom_r0():
# test with `r = 0`.
M = 10
n = 3
r = 0
pmf = nhypergeom.pmf([[0, 1, 2, 0], [1, 2, 0, 3]], M, n, r)
assert_allclose(pmf, [[1, 0, 0, 1], [0, 0, 1, 0]], rtol=1e-13)
def test_nhypergeom_rvs_shape():
# Check that when given a size with more dimensions than the
# dimensions of the broadcast parameters, rvs returns an array
# with the correct shape.
x = nhypergeom.rvs(22, [7, 8, 9], [[12], [13]], size=(5, 1, 2, 3))
assert x.shape == (5, 1, 2, 3)
def test_nhypergeom_accuracy():
# Check that nhypergeom.rvs post-gh-13431 gives the same values as
# inverse transform sampling
np.random.seed(0)
x = nhypergeom.rvs(22, 7, 11, size=100)
np.random.seed(0)
p = np.random.uniform(size=100)
y = nhypergeom.ppf(p, 22, 7, 11)
assert_equal(x, y)
def test_boltzmann_upper_bound():
k = np.arange(-3, 5)
N = 1
p = boltzmann.pmf(k, 0.123, N)
expected = k == 0
assert_equal(p, expected)
lam = np.log(2)
N = 3
p = boltzmann.pmf(k, lam, N)
expected = [0, 0, 0, 4/7, 2/7, 1/7, 0, 0]
assert_allclose(p, expected, rtol=1e-13)
c = boltzmann.cdf(k, lam, N)
expected = [0, 0, 0, 4/7, 6/7, 1, 1, 1]
assert_allclose(c, expected, rtol=1e-13)
def test_betabinom_a_and_b_unity():
# test limiting case that betabinom(n, 1, 1) is a discrete uniform
# distribution from 0 to n
n = 20
k = np.arange(n + 1)
p = betabinom(n, 1, 1).pmf(k)
expected = np.repeat(1 / (n + 1), n + 1)
assert_almost_equal(p, expected)
@pytest.mark.parametrize('dtypes', itertools.product(*[(int, float)]*3))
def test_betabinom_stats_a_and_b_integers_gh18026(dtypes):
# gh-18026 reported that `betabinom` kurtosis calculation fails when some
# parameters are integers. Check that this is resolved.
n_type, a_type, b_type = dtypes
n, a, b = n_type(10), a_type(2), b_type(3)
assert_allclose(betabinom.stats(n, a, b, moments='k'), -0.6904761904761907)
def test_betabinom_bernoulli():
# test limiting case that betabinom(1, a, b) = bernoulli(a / (a + b))
a = 2.3
b = 0.63
k = np.arange(2)
p = betabinom(1, a, b).pmf(k)
expected = bernoulli(a / (a + b)).pmf(k)
assert_almost_equal(p, expected)
def test_issue_10317():
alpha, n, p = 0.9, 10, 1
assert_equal(nbinom.interval(confidence=alpha, n=n, p=p), (0, 0))
def test_issue_11134():
alpha, n, p = 0.95, 10, 0
assert_equal(binom.interval(confidence=alpha, n=n, p=p), (0, 0))
def test_issue_7406():
np.random.seed(0)
assert_equal(binom.ppf(np.random.rand(10), 0, 0.5), 0)
# Also check that endpoints (q=0, q=1) are correct
assert_equal(binom.ppf(0, 0, 0.5), -1)
assert_equal(binom.ppf(1, 0, 0.5), 0)
def test_issue_5122():
p = 0
n = np.random.randint(100, size=10)
x = 0
ppf = binom.ppf(x, n, p)
assert_equal(ppf, -1)
x = np.linspace(0.01, 0.99, 10)
ppf = binom.ppf(x, n, p)
assert_equal(ppf, 0)
x = 1
ppf = binom.ppf(x, n, p)
assert_equal(ppf, n)
def test_issue_1603():
assert_equal(binom(1000, np.logspace(-3, -100)).ppf(0.01), 0)
def test_issue_5503():
p = 0.5
x = np.logspace(3, 14, 12)
assert_allclose(binom.cdf(x, 2*x, p), 0.5, atol=1e-2)
@pytest.mark.parametrize('x, n, p, cdf_desired', [
(300, 1000, 3/10, 0.51559351981411995636),
(3000, 10000, 3/10, 0.50493298381929698016),
(30000, 100000, 3/10, 0.50156000591726422864),
(300000, 1000000, 3/10, 0.50049331906666960038),
(3000000, 10000000, 3/10, 0.50015600124585261196),
(30000000, 100000000, 3/10, 0.50004933192735230102),
(30010000, 100000000, 3/10, 0.98545384016570790717),
(29990000, 100000000, 3/10, 0.01455017177985268670),
(29950000, 100000000, 3/10, 5.02250963487432024943e-28),
])
def test_issue_5503pt2(x, n, p, cdf_desired):
assert_allclose(binom.cdf(x, n, p), cdf_desired)
def test_issue_5503pt3():
# From Wolfram Alpha: CDF[BinomialDistribution[1e12, 1e-12], 2]
assert_allclose(binom.cdf(2, 10**12, 10**-12), 0.91969860292869777384)
def test_issue_6682():
# Reference value from R:
# options(digits=16)
# print(pnbinom(250, 50, 32/63, lower.tail=FALSE))
assert_allclose(nbinom.sf(250, 50, 32./63.), 1.460458510976452e-35)
def test_boost_divide_by_zero_issue_15101():
n = 1000
p = 0.01
k = 996
assert_allclose(binom.pmf(k, n, p), 0.0)
def test_skellam_gh11474():
# test issue reported in gh-11474 caused by `cdfchn`
mu = [1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000]
cdf = skellam.cdf(0, mu, mu)
# generated in R
# library(skellam)
# options(digits = 16)
# mu = c(1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000)
# pskellam(0, mu, mu, TRUE)
cdf_expected = [0.6542541612768356, 0.5448901559424127, 0.5141135799745580,
0.5044605891382528, 0.5019947363350450, 0.5019848365953181,
0.5019750827993392, 0.5019466621805060, 0.5018209330219539]
assert_allclose(cdf, cdf_expected)
class TestZipfian:
def test_zipfian_asymptotic(self):
# test limiting case that zipfian(a, n) -> zipf(a) as n-> oo
a = 6.5
N = 10000000
k = np.arange(1, 21)
assert_allclose(zipfian.pmf(k, a, N), zipf.pmf(k, a))
assert_allclose(zipfian.cdf(k, a, N), zipf.cdf(k, a))
assert_allclose(zipfian.sf(k, a, N), zipf.sf(k, a))
assert_allclose(zipfian.stats(a, N, moments='msvk'),
zipf.stats(a, moments='msvk'))
def test_zipfian_continuity(self):
# test that zipfian(0.999999, n) ~ zipfian(1.000001, n)
# (a = 1 switches between methods of calculating harmonic sum)
alt1, agt1 = 0.99999999, 1.00000001
N = 30
k = np.arange(1, N + 1)
assert_allclose(zipfian.pmf(k, alt1, N), zipfian.pmf(k, agt1, N),
rtol=5e-7)
assert_allclose(zipfian.cdf(k, alt1, N), zipfian.cdf(k, agt1, N),
rtol=5e-7)
assert_allclose(zipfian.sf(k, alt1, N), zipfian.sf(k, agt1, N),
rtol=5e-7)
assert_allclose(zipfian.stats(alt1, N, moments='msvk'),
zipfian.stats(agt1, N, moments='msvk'), rtol=5e-7)
def test_zipfian_R(self):
# test against R VGAM package
# library(VGAM)
# k <- c(13, 16, 1, 4, 4, 8, 10, 19, 5, 7)
# a <- c(1.56712977, 3.72656295, 5.77665117, 9.12168729, 5.79977172,
# 4.92784796, 9.36078764, 4.3739616 , 7.48171872, 4.6824154)
# n <- c(70, 80, 48, 65, 83, 89, 50, 30, 20, 20)
# pmf <- dzipf(k, N = n, shape = a)
# cdf <- pzipf(k, N = n, shape = a)
# print(pmf)
# print(cdf)
np.random.seed(0)
k = np.random.randint(1, 20, size=10)
a = np.random.rand(10)*10 + 1
n = np.random.randint(1, 100, size=10)
pmf = [8.076972e-03, 2.950214e-05, 9.799333e-01, 3.216601e-06,
3.158895e-04, 3.412497e-05, 4.350472e-10, 2.405773e-06,
5.860662e-06, 1.053948e-04]
cdf = [0.8964133, 0.9998666, 0.9799333, 0.9999995, 0.9998584,
0.9999458, 1.0000000, 0.9999920, 0.9999977, 0.9998498]
# skip the first point; zipUC is not accurate for low a, n
assert_allclose(zipfian.pmf(k, a, n)[1:], pmf[1:], rtol=1e-6)
assert_allclose(zipfian.cdf(k, a, n)[1:], cdf[1:], rtol=5e-5)
np.random.seed(0)
naive_tests = np.vstack((np.logspace(-2, 1, 10),
np.random.randint(2, 40, 10))).T
@pytest.mark.parametrize("a, n", naive_tests)
def test_zipfian_naive(self, a, n):
# test against bare-bones implementation
@np.vectorize
def Hns(n, s):
"""Naive implementation of harmonic sum"""
return (1/np.arange(1, n+1)**s).sum()
@np.vectorize
def pzip(k, a, n):
"""Naive implementation of zipfian pmf"""
if k < 1 or k > n:
return 0.
else:
return 1 / k**a / Hns(n, a)
k = np.arange(n+1)
pmf = pzip(k, a, n)
cdf = np.cumsum(pmf)
mean = np.average(k, weights=pmf)
var = np.average((k - mean)**2, weights=pmf)
std = var**0.5
skew = np.average(((k-mean)/std)**3, weights=pmf)
kurtosis = np.average(((k-mean)/std)**4, weights=pmf) - 3
assert_allclose(zipfian.pmf(k, a, n), pmf)
assert_allclose(zipfian.cdf(k, a, n), cdf)
assert_allclose(zipfian.stats(a, n, moments="mvsk"),
[mean, var, skew, kurtosis])
class TestNCH():
np.random.seed(2) # seeds 0 and 1 had some xl = xu; randint failed
shape = (2, 4, 3)
max_m = 100
m1 = np.random.randint(1, max_m, size=shape) # red balls
m2 = np.random.randint(1, max_m, size=shape) # white balls
N = m1 + m2 # total balls
n = randint.rvs(0, N, size=N.shape) # number of draws
xl = np.maximum(0, n-m2) # lower bound of support
xu = np.minimum(n, m1) # upper bound of support
x = randint.rvs(xl, xu, size=xl.shape)
odds = np.random.rand(*x.shape)*2
# test output is more readable when function names (strings) are passed
@pytest.mark.parametrize('dist_name',
['nchypergeom_fisher', 'nchypergeom_wallenius'])
def test_nch_hypergeom(self, dist_name):
# Both noncentral hypergeometric distributions reduce to the
# hypergeometric distribution when odds = 1
dists = {'nchypergeom_fisher': nchypergeom_fisher,
'nchypergeom_wallenius': nchypergeom_wallenius}
dist = dists[dist_name]
x, N, m1, n = self.x, self.N, self.m1, self.n
assert_allclose(dist.pmf(x, N, m1, n, odds=1),
hypergeom.pmf(x, N, m1, n))
def test_nchypergeom_fisher_naive(self):
# test against a very simple implementation
x, N, m1, n, odds = self.x, self.N, self.m1, self.n, self.odds
@np.vectorize
def pmf_mean_var(x, N, m1, n, w):
# simple implementation of nchypergeom_fisher pmf
m2 = N - m1
xl = np.maximum(0, n-m2)
xu = np.minimum(n, m1)
def f(x):
t1 = special_binom(m1, x)
t2 = special_binom(m2, n - x)
return t1 * t2 * w**x
def P(k):
return sum(f(y)*y**k for y in range(xl, xu + 1))
P0 = P(0)
P1 = P(1)
P2 = P(2)
pmf = f(x) / P0
mean = P1 / P0
var = P2 / P0 - (P1 / P0)**2
return pmf, mean, var
pmf, mean, var = pmf_mean_var(x, N, m1, n, odds)
assert_allclose(nchypergeom_fisher.pmf(x, N, m1, n, odds), pmf)
assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='m'),
mean)
assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='v'),
var)
def test_nchypergeom_wallenius_naive(self):
# test against a very simple implementation
np.random.seed(2)
shape = (2, 4, 3)
max_m = 100
m1 = np.random.randint(1, max_m, size=shape)
m2 = np.random.randint(1, max_m, size=shape)
N = m1 + m2
n = randint.rvs(0, N, size=N.shape)
xl = np.maximum(0, n-m2)
xu = np.minimum(n, m1)
x = randint.rvs(xl, xu, size=xl.shape)
w = np.random.rand(*x.shape)*2
def support(N, m1, n, w):
m2 = N - m1
xl = np.maximum(0, n-m2)
xu = np.minimum(n, m1)
return xl, xu
@np.vectorize
def mean(N, m1, n, w):
m2 = N - m1
xl, xu = support(N, m1, n, w)
def fun(u):
return u/m1 + (1 - (n-u)/m2)**w - 1
return root_scalar(fun, bracket=(xl, xu)).root
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
message="invalid value encountered in mean")
assert_allclose(nchypergeom_wallenius.mean(N, m1, n, w),
mean(N, m1, n, w), rtol=2e-2)
@np.vectorize
def variance(N, m1, n, w):
m2 = N - m1
u = mean(N, m1, n, w)
a = u * (m1 - u)
b = (n-u)*(u + m2 - n)
return N*a*b / ((N-1) * (m1*b + m2*a))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
message="invalid value encountered in mean")
assert_allclose(
nchypergeom_wallenius.stats(N, m1, n, w, moments='v'),
variance(N, m1, n, w),
rtol=5e-2
)
@np.vectorize
def pmf(x, N, m1, n, w):
m2 = N - m1
xl, xu = support(N, m1, n, w)
def integrand(t):
D = w*(m1 - x) + (m2 - (n-x))
res = (1-t**(w/D))**x * (1-t**(1/D))**(n-x)
return res
def f(x):
t1 = special_binom(m1, x)
t2 = special_binom(m2, n - x)
the_integral = quad(integrand, 0, 1,
epsrel=1e-16, epsabs=1e-16)
return t1 * t2 * the_integral[0]
return f(x)
pmf0 = pmf(x, N, m1, n, w)
pmf1 = nchypergeom_wallenius.pmf(x, N, m1, n, w)
atol, rtol = 1e-6, 1e-6
i = np.abs(pmf1 - pmf0) < atol + rtol*np.abs(pmf0)
assert i.sum() > np.prod(shape) / 2 # works at least half the time
# for those that fail, discredit the naive implementation
for N, m1, n, w in zip(N[~i], m1[~i], n[~i], w[~i]):
# get the support
m2 = N - m1
xl, xu = support(N, m1, n, w)
x = np.arange(xl, xu + 1)
# calculate sum of pmf over the support
# the naive implementation is very wrong in these cases
assert pmf(x, N, m1, n, w).sum() < .5
assert_allclose(nchypergeom_wallenius.pmf(x, N, m1, n, w).sum(), 1)
def test_wallenius_against_mpmath(self):
# precompute data with mpmath since naive implementation above
# is not reliable. See source code in gh-13330.
M = 50
n = 30
N = 20
odds = 2.25
# Expected results, computed with mpmath.
sup = np.arange(21)
pmf = np.array([3.699003068656875e-20,
5.89398584245431e-17,
2.1594437742911123e-14,
3.221458044649955e-12,
2.4658279241205077e-10,
1.0965862603981212e-08,
3.057890479665704e-07,
5.622818831643761e-06,
7.056482841531681e-05,
0.000618899425358671,
0.003854172932571669,
0.01720592676256026,
0.05528844897093792,
0.12772363313574242,
0.21065898367825722,
0.24465958845359234,
0.1955114898110033,
0.10355390084949237,
0.03414490375225675,
0.006231989845775931,
0.0004715577304677075])
mean = 14.808018384813426
var = 2.6085975877923717
# nchypergeom_wallenius.pmf returns 0 for pmf(0) and pmf(1), and pmf(2)
# has only three digits of accuracy (~ 2.1511e-14).
assert_allclose(nchypergeom_wallenius.pmf(sup, M, n, N, odds), pmf,
rtol=1e-13, atol=1e-13)
assert_allclose(nchypergeom_wallenius.mean(M, n, N, odds),
mean, rtol=1e-13)
assert_allclose(nchypergeom_wallenius.var(M, n, N, odds),
var, rtol=1e-11)
@pytest.mark.parametrize('dist_name',
['nchypergeom_fisher', 'nchypergeom_wallenius'])
def test_rvs_shape(self, dist_name):
# Check that when given a size with more dimensions than the
# dimensions of the broadcast parameters, rvs returns an array
# with the correct shape.
dists = {'nchypergeom_fisher': nchypergeom_fisher,
'nchypergeom_wallenius': nchypergeom_wallenius}
dist = dists[dist_name]
x = dist.rvs(50, 30, [[10], [20]], [0.5, 1.0, 2.0], size=(5, 1, 2, 3))
assert x.shape == (5, 1, 2, 3)
@pytest.mark.parametrize("mu, q, expected",
[[10, 120, -1.240089881791596e-38],
[1500, 0, -86.61466680572661]])
def test_nbinom_11465(mu, q, expected):
# test nbinom.logcdf at extreme tails
size = 20
n, p = size, size/(size+mu)
# In R:
# options(digits=16)
# pnbinom(mu=10, size=20, q=120, log.p=TRUE)
assert_allclose(nbinom.logcdf(q, n, p), expected)
def test_gh_17146():
# Check that discrete distributions return PMF of zero at non-integral x.
# See gh-17146.
x = np.linspace(0, 1, 11)
p = 0.8
pmf = bernoulli(p).pmf(x)
i = (x % 1 == 0)
assert_allclose(pmf[-1], p)
assert_allclose(pmf[0], 1-p)
assert_equal(pmf[~i], 0)
| 20,463
| 34.466205
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_mstats_extras.py
|
import numpy as np
import numpy.ma as ma
import scipy.stats.mstats as ms
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_allclose)
def test_compare_medians_ms():
x = np.arange(7)
y = x + 10
assert_almost_equal(ms.compare_medians_ms(x, y), 0)
y2 = np.linspace(0, 1, num=10)
assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778)
def test_hdmedian():
# 1-D array
x = ma.arange(11)
assert_allclose(ms.hdmedian(x), 5, rtol=1e-14)
x.mask = ma.make_mask(x)
x.mask[:7] = False
assert_allclose(ms.hdmedian(x), 3, rtol=1e-14)
# Check that `var` keyword returns a value. TODO: check whether returned
# value is actually correct.
assert_(ms.hdmedian(x, var=True).size == 2)
# 2-D array
x2 = ma.arange(22).reshape((11, 2))
assert_allclose(ms.hdmedian(x2, axis=0), [10, 11])
x2.mask = ma.make_mask(x2)
x2.mask[:7, :] = False
assert_allclose(ms.hdmedian(x2, axis=0), [6, 7])
def test_rsh():
np.random.seed(132345)
x = np.random.randn(100)
res = ms.rsh(x)
# Just a sanity check that the code runs and output shape is correct.
# TODO: check that implementation is correct.
assert_(res.shape == x.shape)
# Check points keyword
res = ms.rsh(x, points=[0, 1.])
assert_(res.size == 2)
def test_mjci():
# Tests the Marits-Jarrett estimator
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
def test_trimmed_mean_ci():
# Tests the confidence intervals of the trimmed mean.
data = ma.array([545,555,558,572,575,576,578,580,
594,605,635,651,653,661,666])
assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
[561.8, 630.6])
def test_idealfourths():
# Tests ideal-fourths
test = np.arange(100)
assert_almost_equal(np.asarray(ms.idealfourths(test)),
[24.416667,74.583333],6)
test_2D = test.repeat(3).reshape(-1,3)
assert_almost_equal(ms.idealfourths(test_2D, axis=0),
[[24.416667,24.416667,24.416667],
[74.583333,74.583333,74.583333]],6)
assert_almost_equal(ms.idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0, 0]
_result = ms.idealfourths(test)
assert_(np.isnan(_result).all())
class TestQuantiles:
data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
def test_hdquantiles(self):
data = self.data
assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
[0.006514031, 0.995309248])
hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
data = np.array(data).reshape(10,10)
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
assert_almost_equal(hdq[...,0],
ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
assert_almost_equal(hdq[...,-1],
ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
def test_hdquantiles_sd(self):
# Standard deviation is a jackknife estimator, so we can check if
# the efficient version (hdquantiles_sd) matches a rudimentary,
# but clear version here.
hd_std_errs = ms.hdquantiles_sd(self.data)
# jacknnife standard error, Introduction to the Bootstrap Eq. 11.5
n = len(self.data)
jdata = np.broadcast_to(self.data, (n, n))
jselector = np.logical_not(np.eye(n)) # leave out one sample each row
jdata = jdata[jselector].reshape(n, n-1)
jdist = ms.hdquantiles(jdata, axis=1)
jdist_mean = np.mean(jdist, axis=0)
jstd = ((n-1)/n * np.sum((jdist - jdist_mean)**2, axis=0))**.5
assert_almost_equal(hd_std_errs, jstd)
# Test actual values for good measure
assert_almost_equal(hd_std_errs, [0.0379258, 0.0380656, 0.0380013])
two_data_points = ms.hdquantiles_sd([1, 2])
assert_almost_equal(two_data_points, [0.5, 0.5, 0.5])
def test_mquantiles_cimj(self):
# Only test that code runs, implementation not checked for correctness
ci_lower, ci_upper = ms.mquantiles_cimj(self.data)
assert_(ci_lower.size == ci_upper.size == 3)
def test_median_cihs():
# Basic test against R library EnvStats function `eqnpar`, e.g.
# library(EnvStats)
# options(digits=8)
# x = c(0.88612955, 0.35242375, 0.66240904, 0.94617974, 0.10929913,
# 0.76699506, 0.88550655, 0.62763754, 0.76818588, 0.68506508,
# 0.88043148, 0.03911248, 0.93805564, 0.95326961, 0.25291112,
# 0.16128487, 0.49784577, 0.24588924, 0.6597, 0.92239679)
# eqnpar(x, p=0.5,
# ci.method = "interpolate", approx.conf.level = 0.95, ci = TRUE)
rng = np.random.default_rng(8824288259505800535)
x = rng.random(size=20)
assert_allclose(ms.median_cihs(x), (0.38663198, 0.88431272))
# SciPy's 90% CI upper limit doesn't match that of EnvStats eqnpar. SciPy
# doesn't look wrong, and it agrees with a different reference,
# `median_confint_hs` from `hoehleatsu/quantileCI`.
# In (e.g.) Colab with R runtime:
# devtools::install_github("hoehleatsu/quantileCI")
# library(quantileCI)
# median_confint_hs(x=x, conf.level=0.90, interpolate=TRUE)
assert_allclose(ms.median_cihs(x, 0.1), (0.48319773366, 0.88094268050))
| 7,297
| 41.184971
| 82
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_boost_ufuncs.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import _boost
type_char_to_type_tol = {'f': (np.float32, 32*np.finfo(np.float32).eps),
'd': (np.float64, 32*np.finfo(np.float64).eps)}
# Each item in this list is
# (func, args, expected_value)
# All the values can be represented exactly, even with np.float32.
#
# This is not an exhaustive test data set of all the functions!
# It is a spot check of several functions, primarily for
# checking that the different data types are handled correctly.
test_data = [
(_boost._beta_cdf, (0.5, 2, 3), 0.6875),
(_boost._beta_ppf, (0.6875, 2, 3), 0.5),
(_boost._beta_pdf, (0.5, 2, 3), 1.5),
(_boost._beta_pdf, (0, 1, 5), 5.0),
(_boost._beta_pdf, (1, 5, 1), 5.0),
(_boost._beta_sf, (0.5, 2, 1), 0.75),
(_boost._beta_isf, (0.75, 2, 1), 0.5),
(_boost._binom_cdf, (1, 3, 0.5), 0.5),
(_boost._binom_pdf, (1, 4, 0.5), 0.25),
(_boost._hypergeom_cdf, (2, 3, 5, 6), 0.5),
(_boost._nbinom_cdf, (1, 4, 0.25), 0.015625),
(_boost._ncf_mean, (10, 12, 2.5), 1.5),
]
@pytest.mark.parametrize('func, args, expected', test_data)
def test_stats_boost_ufunc(func, args, expected):
type_sigs = func.types
type_chars = [sig.split('->')[-1] for sig in type_sigs]
for type_char in type_chars:
typ, rtol = type_char_to_type_tol[type_char]
args = [typ(arg) for arg in args]
# Harmless overflow warnings are a "feature" of some wrappers on some
# plaforms. This test is about dtype and accuracy, so let's avoid false
# test failures cause by these warnings. See gh-17432.
with np.errstate(over='ignore'):
value = func(*args)
assert isinstance(value, typ)
assert_allclose(value, expected, rtol=rtol)
| 1,824
| 37.020833
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/stats/tests/test_sensitivity_analysis.py
|
from typing import Tuple
import numpy as np
from numpy.testing import assert_allclose, assert_array_less
import pytest
from scipy import stats
from scipy.stats import sobol_indices
from scipy.stats._resampling import BootstrapResult
from scipy.stats._sensitivity_analysis import (
BootstrapSobolResult, f_ishigami, sample_AB, sample_A_B
)
@pytest.fixture(scope='session')
def ishigami_ref_indices():
"""Reference values for Ishigami from Saltelli2007.
Chapter 4, exercise 5 pages 179-182.
"""
a = 7.
b = 0.1
var = 0.5 + a**2/8 + b*np.pi**4/5 + b**2*np.pi**8/18
v1 = 0.5 + b*np.pi**4/5 + b**2*np.pi**8/50
v2 = a**2/8
v3 = 0
v12 = 0
# v13: mistake in the book, see other derivations e.g. in 10.1002/nme.4856
v13 = b**2*np.pi**8*8/225
v23 = 0
s_first = np.array([v1, v2, v3])/var
s_second = np.array([
[0., 0., v13],
[v12, 0., v23],
[v13, v23, 0.]
])/var
s_total = s_first + s_second.sum(axis=1)
return s_first, s_total
def f_ishigami_vec(x):
"""Output of shape (2, n)."""
res = f_ishigami(x)
return res, res
class TestSobolIndices:
dists = [
stats.uniform(loc=-np.pi, scale=2*np.pi) # type: ignore[attr-defined]
] * 3
def test_sample_AB(self):
# (d, n)
A = np.array(
[[1, 4, 7, 10],
[2, 5, 8, 11],
[3, 6, 9, 12]]
)
B = A + 100
# (d, d, n)
ref = np.array(
[[[101, 104, 107, 110],
[2, 5, 8, 11],
[3, 6, 9, 12]],
[[1, 4, 7, 10],
[102, 105, 108, 111],
[3, 6, 9, 12]],
[[1, 4, 7, 10],
[2, 5, 8, 11],
[103, 106, 109, 112]]]
)
AB = sample_AB(A=A, B=B)
assert_allclose(AB, ref)
@pytest.mark.xfail_on_32bit("Can't create large array for test")
@pytest.mark.parametrize(
'func',
[f_ishigami, pytest.param(f_ishigami_vec, marks=pytest.mark.slow)],
ids=['scalar', 'vector']
)
def test_ishigami(self, ishigami_ref_indices, func):
rng = np.random.default_rng(28631265345463262246170309650372465332)
res = sobol_indices(
func=func, n=4096,
dists=self.dists,
random_state=rng
)
if func.__name__ == 'f_ishigami_vec':
ishigami_ref_indices = [
[ishigami_ref_indices[0], ishigami_ref_indices[0]],
[ishigami_ref_indices[1], ishigami_ref_indices[1]]
]
assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2)
assert res._bootstrap_result is None
bootstrap_res = res.bootstrap(n_resamples=99)
assert isinstance(bootstrap_res, BootstrapSobolResult)
assert isinstance(res._bootstrap_result, BootstrapResult)
assert res._bootstrap_result.confidence_interval.low.shape[0] == 2
assert res._bootstrap_result.confidence_interval.low[1].shape \
== res.first_order.shape
assert bootstrap_res.first_order.confidence_interval.low.shape \
== res.first_order.shape
assert bootstrap_res.total_order.confidence_interval.low.shape \
== res.total_order.shape
assert_array_less(
bootstrap_res.first_order.confidence_interval.low, res.first_order
)
assert_array_less(
res.first_order, bootstrap_res.first_order.confidence_interval.high
)
assert_array_less(
bootstrap_res.total_order.confidence_interval.low, res.total_order
)
assert_array_less(
res.total_order, bootstrap_res.total_order.confidence_interval.high
)
# call again to use previous results and change a param
assert isinstance(
res.bootstrap(confidence_level=0.9, n_resamples=99),
BootstrapSobolResult
)
assert isinstance(res._bootstrap_result, BootstrapResult)
def test_func_dict(self, ishigami_ref_indices):
rng = np.random.default_rng(28631265345463262246170309650372465332)
n = 4096
dists = [
stats.uniform(loc=-np.pi, scale=2*np.pi),
stats.uniform(loc=-np.pi, scale=2*np.pi),
stats.uniform(loc=-np.pi, scale=2*np.pi)
]
A, B = sample_A_B(n=n, dists=dists, random_state=rng)
AB = sample_AB(A=A, B=B)
func = {
'f_A': f_ishigami(A).reshape(1, -1),
'f_B': f_ishigami(B).reshape(1, -1),
'f_AB': f_ishigami(AB).reshape((3, 1, -1))
}
res = sobol_indices(
func=func, n=n,
dists=dists,
random_state=rng
)
assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
res = sobol_indices(
func=func, n=n,
random_state=rng
)
assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
def test_method(self, ishigami_ref_indices):
def jansen_sobol(f_A, f_B, f_AB):
"""Jansen for S and Sobol' for St.
From Saltelli2010, table 2 formulations (c) and (e)."""
var = np.var([f_A, f_B], axis=(0, -1))
s = (var - 0.5*np.mean((f_B - f_AB)**2, axis=-1)) / var
st = np.mean(f_A*(f_A - f_AB), axis=-1) / var
return s.T, st.T
rng = np.random.default_rng(28631265345463262246170309650372465332)
res = sobol_indices(
func=f_ishigami, n=4096,
dists=self.dists,
method=jansen_sobol,
random_state=rng
)
assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2)
def jansen_sobol_typed(
f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
return jansen_sobol(f_A, f_B, f_AB)
_ = sobol_indices(
func=f_ishigami, n=8,
dists=self.dists,
method=jansen_sobol_typed,
random_state=rng
)
def test_normalization(self, ishigami_ref_indices):
rng = np.random.default_rng(28631265345463262246170309650372465332)
res = sobol_indices(
func=lambda x: f_ishigami(x) + 1000, n=4096,
dists=self.dists,
random_state=rng
)
assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2)
assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2)
def test_constant_function(self, ishigami_ref_indices):
def f_ishigami_vec_const(x):
"""Output of shape (3, n)."""
res = f_ishigami(x)
return res, res * 0 + 10, res
rng = np.random.default_rng(28631265345463262246170309650372465332)
res = sobol_indices(
func=f_ishigami_vec_const, n=4096,
dists=self.dists,
random_state=rng
)
ishigami_vec_indices = [
[ishigami_ref_indices[0], [0, 0, 0], ishigami_ref_indices[0]],
[ishigami_ref_indices[1], [0, 0, 0], ishigami_ref_indices[1]]
]
assert_allclose(res.first_order, ishigami_vec_indices[0], atol=1e-2)
assert_allclose(res.total_order, ishigami_vec_indices[1], atol=1e-2)
@pytest.mark.xfail_on_32bit("Can't create large array for test")
def test_more_converged(self, ishigami_ref_indices):
rng = np.random.default_rng(28631265345463262246170309650372465332)
res = sobol_indices(
func=f_ishigami, n=2**19, # 524288
dists=self.dists,
random_state=rng
)
assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-4)
assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-4)
def test_raises(self):
message = r"Each distribution in `dists` must have method `ppf`"
with pytest.raises(ValueError, match=message):
sobol_indices(n=0, func=f_ishigami, dists="uniform")
with pytest.raises(ValueError, match=message):
sobol_indices(n=0, func=f_ishigami, dists=[lambda x: x])
message = r"The balance properties of Sobol'"
with pytest.raises(ValueError, match=message):
sobol_indices(n=7, func=f_ishigami, dists=[stats.uniform()])
with pytest.raises(ValueError, match=message):
sobol_indices(n=4.1, func=f_ishigami, dists=[stats.uniform()])
message = r"'toto' is not a valid 'method'"
with pytest.raises(ValueError, match=message):
sobol_indices(n=0, func=f_ishigami, method='toto')
message = r"must have the following signature"
with pytest.raises(ValueError, match=message):
sobol_indices(n=0, func=f_ishigami, method=lambda x: x)
message = r"'dists' must be defined when 'func' is a callable"
with pytest.raises(ValueError, match=message):
sobol_indices(n=0, func=f_ishigami)
def func_wrong_shape_output(x):
return x.reshape(-1, 1)
message = r"'func' output should have a shape"
with pytest.raises(ValueError, match=message):
sobol_indices(
n=2, func=func_wrong_shape_output, dists=[stats.uniform()]
)
message = r"When 'func' is a dictionary"
with pytest.raises(ValueError, match=message):
sobol_indices(
n=2, func={'f_A': [], 'f_AB': []}, dists=[stats.uniform()]
)
with pytest.raises(ValueError, match=message):
# f_B malformed
sobol_indices(
n=2,
func={'f_A': [1, 2], 'f_B': [3], 'f_AB': [5, 6, 7, 8]},
)
with pytest.raises(ValueError, match=message):
# f_AB malformed
sobol_indices(
n=2,
func={'f_A': [1, 2], 'f_B': [3, 4], 'f_AB': [5, 6, 7]},
)
| 10,160
| 32.534653
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_fit.py
|
import os
import numpy as np
import numpy.testing as npt
from numpy.testing import assert_allclose, assert_equal
import pytest
from scipy import stats
from scipy.optimize import differential_evolution
from .test_continuous_basic import distcont
from scipy.stats._distn_infrastructure import FitError
from scipy.stats._distr_params import distdiscrete
from scipy.stats import goodness_of_fit
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
fit_sizes = [1000, 5000, 10000] # sample sizes to try
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
mle_failing_fits = [
'gausshyper',
'genexpon',
'gengamma',
'kappa4',
'ksone',
'kstwo',
'ncf',
'ncx2',
'truncexpon',
'tukeylambda',
'vonmises',
'levy_stable',
'trapezoid',
'truncweibull_min',
'studentized_range',
]
# The MLE fit method of these distributions doesn't perform well when all
# parameters are fit, so test them with the location fixed at 0.
mle_use_floc0 = [
'burr',
'chi',
'chi2',
'mielke',
'pearson3',
'genhalflogistic',
'rdist',
'pareto',
'powerlaw', # distfn.nnlf(est2, rvs) > distfn.nnlf(est1, rvs) otherwise
'powerlognorm',
'wrapcauchy',
'rel_breitwigner',
]
mm_failing_fits = ['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', 'chi',
'chi2', 'crystalball', 'dgamma', 'dweibull', 'f',
'fatiguelife', 'fisk', 'foldcauchy', 'genextreme',
'gengamma', 'genhyperbolic', 'gennorm', 'genpareto',
'halfcauchy', 'invgamma', 'invweibull', 'johnsonsu',
'kappa3', 'ksone', 'kstwo', 'levy', 'levy_l',
'levy_stable', 'loglaplace', 'lomax', 'mielke', 'nakagami',
'ncf', 'nct', 'ncx2', 'pareto', 'powerlognorm', 'powernorm',
'rel_breitwigner', 'skewcauchy', 't', 'trapezoid', 'triang',
'truncpareto', 'truncweibull_min', 'tukeylambda',
'studentized_range']
# not sure if these fail, but they caused my patience to fail
mm_slow_fits = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
'kappa4', 'kstwobign', 'recipinvgauss',
'truncexpon', 'vonmises', 'vonmises_line']
failing_fits = {"MM": mm_failing_fits + mm_slow_fits, "MLE": mle_failing_fits}
fail_interval_censored = {"truncpareto"}
# Don't run the fit test on these:
skip_fit = [
'erlang', # Subclass of gamma, generates a warning.
'genhyperbolic', # too slow
]
def cases_test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample
# size <= 10000
for distname, arg in distcont:
if distname not in skip_fit:
yield distname, arg
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg', cases_test_cont_fit())
@pytest.mark.parametrize('method', ["MLE", "MM"])
def test_cont_fit(distname, arg, method):
if distname in failing_fits[method]:
# Skip failing fits unless overridden
try:
xfail = not int(os.environ['SCIPY_XFAIL'])
except Exception:
xfail = True
if xfail:
msg = "Fitting %s doesn't work reliably yet" % distname
msg += (" [Set environment variable SCIPY_XFAIL=1 to run this"
" test nevertheless.]")
pytest.xfail(msg)
distfn = getattr(stats, distname)
truearg = np.hstack([arg, [0.0, 1.0]])
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.full(distfn.numargs+2, thresh_min)]),
0)
for fit_size in fit_sizes:
# Note that if a fit succeeds, the other fit_sizes are skipped
np.random.seed(1234)
with np.errstate(all='ignore'):
rvs = distfn.rvs(size=fit_size, *arg)
if method == 'MLE' and distfn.name in mle_use_floc0:
kwds = {'floc': 0}
else:
kwds = {}
# start with default values
est = distfn.fit(rvs, method=method, **kwds)
if method == 'MLE':
# Trivial test of the use of CensoredData. The fit() method
# will check that data contains no actual censored data, and
# do a regular uncensored fit.
data1 = stats.CensoredData(rvs)
est1 = distfn.fit(data1, **kwds)
msg = ('Different results fitting uncensored data wrapped as'
f' CensoredData: {distfn.name}: est={est} est1={est1}')
assert_allclose(est1, est, rtol=1e-10, err_msg=msg)
if method == 'MLE' and distname not in fail_interval_censored:
# Convert the first `nic` values in rvs to interval-censored
# values. The interval is small, so est2 should be close to
# est.
nic = 15
interval = np.column_stack((rvs, rvs))
interval[:nic, 0] *= 0.99
interval[:nic, 1] *= 1.01
interval.sort(axis=1)
data2 = stats.CensoredData(interval=interval)
est2 = distfn.fit(data2, **kwds)
msg = ('Different results fitting interval-censored'
f' data: {distfn.name}: est={est} est2={est2}')
assert_allclose(est2, est, rtol=0.05, err_msg=msg)
diff = est - truearg
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,
thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.all(np.abs(diff) <= diffthreshold):
break
else:
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
def _check_loc_scale_mle_fit(name, data, desired, atol=None):
d = getattr(stats, name)
actual = d.fit(data)[-2:]
assert_allclose(actual, desired, atol=atol,
err_msg='poor mle fit of (loc, scale) in %s' % name)
def test_non_default_loc_scale_mle_fit():
data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00])
_check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3)
_check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3)
def test_expon_fit():
"""gh-6167"""
data = [0, 0, 0, 0, 2, 2, 2, 2]
phat = stats.expon.fit(data, floc=0)
assert_allclose(phat, [0, 1.0], atol=1e-3)
def test_fit_error():
data = np.concatenate([np.zeros(29), np.ones(21)])
message = "Optimization converged to parameters that are..."
with pytest.raises(FitError, match=message), \
pytest.warns(RuntimeWarning):
stats.beta.fit(data)
@pytest.mark.parametrize("dist, params",
[(stats.norm, (0.5, 2.5)), # type: ignore[attr-defined] # noqa
(stats.binom, (10, 0.3, 2))]) # type: ignore[attr-defined] # noqa
def test_nnlf_and_related_methods(dist, params):
rng = np.random.default_rng(983459824)
if hasattr(dist, 'pdf'):
logpxf = dist.logpdf
else:
logpxf = dist.logpmf
x = dist.rvs(*params, size=100, random_state=rng)
ref = -logpxf(x, *params).sum()
res1 = dist.nnlf(params, x)
res2 = dist._penalized_nnlf(params, x)
assert_allclose(res1, ref)
assert_allclose(res2, ref)
def cases_test_fit_mle():
# These fail default test or hang
skip_basic_fit = {'argus', 'foldnorm', 'truncpareto', 'truncweibull_min',
'ksone', 'levy_stable', 'studentized_range', 'kstwo',
'arcsine'}
# Please keep this list in alphabetical order...
slow_basic_fit = {'alpha',
'betaprime', 'binom', 'bradford', 'burr12',
'chi', 'crystalball', 'dweibull', 'exponpow',
'f', 'fatiguelife', 'fisk', 'foldcauchy',
'genexpon', 'genextreme', 'gennorm', 'genpareto',
'gompertz', 'halfgennorm', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'kappa3', 'kstwobign',
'loglaplace', 'lognorm', 'lomax', 'mielke',
'nakagami', 'nbinom', 'norminvgauss',
'pareto', 'pearson3', 'powerlaw', 'powernorm',
'randint', 'rdist', 'recipinvgauss', 'rice',
't', 'uniform', 'weibull_max', 'wrapcauchy'}
# Please keep this list in alphabetical order...
xslow_basic_fit = {'beta', 'betabinom', 'burr', 'exponweib',
'gausshyper', 'gengamma', 'genhalflogistic',
'genhyperbolic', 'geninvgauss',
'hypergeom', 'kappa4', 'loguniform',
'ncf', 'nchypergeom_fisher', 'nchypergeom_wallenius',
'nct', 'ncx2', 'nhypergeom',
'powerlognorm', 'reciprocal', 'rel_breitwigner',
'skellam', 'trapezoid', 'triang', 'truncnorm',
'tukeylambda', 'zipfian'}
for dist in dict(distdiscrete + distcont):
if dist in skip_basic_fit or not isinstance(dist, str):
reason = "tested separately"
yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
elif dist in slow_basic_fit:
reason = "too slow (>= 0.25s)"
yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
elif dist in xslow_basic_fit:
reason = "too slow (>= 1.0s)"
yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
else:
yield dist
def cases_test_fit_mse():
# the first four are so slow that I'm not sure whether they would pass
skip_basic_fit = {'levy_stable', 'studentized_range', 'ksone', 'skewnorm',
'norminvgauss', # super slow (~1 hr) but passes
'kstwo', # very slow (~25 min) but passes
'geninvgauss', # quite slow (~4 minutes) but passes
'gausshyper', 'genhyperbolic', # integration warnings
'tukeylambda', # close, but doesn't meet tolerance
'vonmises'} # can have negative CDF; doesn't play nice
# Please keep this list in alphabetical order...
slow_basic_fit = {'alpha', 'anglit', 'arcsine', 'betabinom', 'bradford',
'chi', 'chi2', 'crystalball', 'dgamma', 'dweibull',
'erlang', 'exponnorm', 'exponpow', 'exponweib',
'fatiguelife', 'fisk', 'foldcauchy', 'foldnorm',
'gamma', 'genexpon', 'genextreme', 'genhalflogistic',
'genlogistic', 'genpareto', 'gompertz',
'hypergeom', 'invweibull', 'johnsonsb', 'johnsonsu',
'kappa3', 'kstwobign',
'laplace_asymmetric', 'loggamma', 'loglaplace',
'lognorm', 'lomax',
'maxwell', 'mielke', 'nakagami', 'nhypergeom',
'pareto', 'powernorm', 'randint', 'recipinvgauss',
'semicircular',
't', 'triang', 'truncexpon', 'truncpareto',
'truncweibull_min',
'uniform', 'vonmises_line',
'wald', 'weibull_max', 'weibull_min', 'wrapcauchy'}
# Please keep this list in alphabetical order...
xslow_basic_fit = {'beta', 'betaprime', 'burr', 'burr12',
'f', 'gengamma', 'gennorm',
'halfgennorm', 'invgamma', 'invgauss',
'kappa4', 'loguniform',
'ncf', 'nchypergeom_fisher', 'nchypergeom_wallenius',
'nct', 'ncx2',
'pearson3', 'powerlaw', 'powerlognorm',
'rdist', 'reciprocal', 'rel_breitwigner', 'rice',
'trapezoid', 'truncnorm',
'zipfian'}
warns_basic_fit = {'skellam'} # can remove mark after gh-14901 is resolved
for dist in dict(distdiscrete + distcont):
if dist in skip_basic_fit or not isinstance(dist, str):
reason = "Fails. Oh well."
yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
elif dist in slow_basic_fit:
reason = "too slow (>= 0.25s)"
yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
elif dist in xslow_basic_fit:
reason = "too slow (>= 1.0s)"
yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
elif dist in warns_basic_fit:
mark = pytest.mark.filterwarnings('ignore::RuntimeWarning')
yield pytest.param(dist, marks=mark)
else:
yield dist
def cases_test_fitstart():
for distname, shapes in dict(distcont).items():
if (not isinstance(distname, str) or
distname in {'studentized_range', 'recipinvgauss'}): # slow
continue
yield distname, shapes
@pytest.mark.parametrize('distname, shapes', cases_test_fitstart())
def test_fitstart(distname, shapes):
dist = getattr(stats, distname)
rng = np.random.default_rng(216342614)
data = rng.random(10)
with np.errstate(invalid='ignore', divide='ignore'): # irrelevant to test
guess = dist._fitstart(data)
assert dist._argcheck(*guess[:-2])
def assert_nlff_less_or_close(dist, data, params1, params0, rtol=1e-7, atol=0,
nlff_name='nnlf'):
nlff = getattr(dist, nlff_name)
nlff1 = nlff(params1, data)
nlff0 = nlff(params0, data)
if not (nlff1 < nlff0):
np.testing.assert_allclose(nlff1, nlff0, rtol=rtol, atol=atol)
class TestFit:
dist = stats.binom # type: ignore[attr-defined]
seed = 654634816187
rng = np.random.default_rng(seed)
data = stats.binom.rvs(5, 0.5, size=100, random_state=rng) # type: ignore[attr-defined] # noqa
shape_bounds_a = [(1, 10), (0, 1)]
shape_bounds_d = {'n': (1, 10), 'p': (0, 1)}
atol = 5e-2
rtol = 1e-2
tols = {'atol': atol, 'rtol': rtol}
def opt(self, *args, **kwds):
return differential_evolution(*args, seed=0, **kwds)
def test_dist_iv(self):
message = "`dist` must be an instance of..."
with pytest.raises(ValueError, match=message):
stats.fit(10, self.data, self.shape_bounds_a)
def test_data_iv(self):
message = "`data` must be exactly one-dimensional."
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [[1, 2, 3]], self.shape_bounds_a)
message = "All elements of `data` must be finite numbers."
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [1, 2, 3, np.nan], self.shape_bounds_a)
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [1, 2, 3, np.inf], self.shape_bounds_a)
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, ['1', '2', '3'], self.shape_bounds_a)
def test_bounds_iv(self):
message = "Bounds provided for the following unrecognized..."
shape_bounds = {'n': (1, 10), 'p': (0, 1), '1': (0, 10)}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "Each element of a `bounds` sequence must be a tuple..."
shape_bounds = [(1, 10, 3), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "Each element of `bounds` must be a tuple specifying..."
shape_bounds = [(1, 10, 3), (0, 1, 0.5)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
shape_bounds = [1, 0]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "A `bounds` sequence must contain at least 2 elements..."
shape_bounds = [(1, 10)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "A `bounds` sequence may not contain more than 3 elements..."
bounds = [(1, 10), (1, 10), (1, 10), (1, 10)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, bounds)
message = "There are no values for `p` on the interval..."
shape_bounds = {'n': (1, 10), 'p': (1, 0)}
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "There are no values for `n` on the interval..."
shape_bounds = [(10, 1), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "There are no integer values for `n` on the interval..."
shape_bounds = [(1.4, 1.6), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "The intersection of user-provided bounds for `n`"
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data)
shape_bounds = [(-np.inf, np.inf), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
def test_guess_iv(self):
message = "Guesses provided for the following unrecognized..."
guess = {'n': 1, 'p': 0.5, '1': 255}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Each element of `guess` must be a scalar..."
guess = {'n': 1, 'p': 'hi'}
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
guess = [1, 'f']
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
guess = [[1, 2]]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "A `guess` sequence must contain at least 2..."
guess = [1]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "A `guess` sequence may not contain more than 3..."
guess = [1, 2, 3, 4]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `n` rounded..."
guess = {'n': 4.5, 'p': -0.5}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `loc` rounded..."
guess = [5, 0.5, 0.5]
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `p` clipped..."
guess = {'n': 5, 'p': -0.5}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `loc` clipped..."
guess = [5, 0.5, 1]
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
def basic_fit_test(self, dist_name, method):
N = 5000
dist_data = dict(distcont + distdiscrete)
rng = np.random.default_rng(self.seed)
dist = getattr(stats, dist_name)
shapes = np.array(dist_data[dist_name])
bounds = np.empty((len(shapes) + 2, 2), dtype=np.float64)
bounds[:-2, 0] = shapes/10.**np.sign(shapes)
bounds[:-2, 1] = shapes*10.**np.sign(shapes)
bounds[-2] = (0, 10)
bounds[-1] = (1e-16, 10)
loc = rng.uniform(*bounds[-2])
scale = rng.uniform(*bounds[-1])
ref = list(dist_data[dist_name]) + [loc, scale]
if getattr(dist, 'pmf', False):
ref = ref[:-1]
ref[-1] = np.floor(loc)
data = dist.rvs(*ref, size=N, random_state=rng)
bounds = bounds[:-1]
if getattr(dist, 'pdf', False):
data = dist.rvs(*ref, size=N, random_state=rng)
with npt.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "overflow encountered")
res = stats.fit(dist, data, bounds, method=method,
optimizer=self.opt)
nlff_names = {'mle': 'nnlf', 'mse': '_penalized_nlpsf'}
nlff_name = nlff_names[method]
assert_nlff_less_or_close(dist, data, res.params, ref, **self.tols,
nlff_name=nlff_name)
@pytest.mark.parametrize("dist_name", cases_test_fit_mle())
def test_basic_fit_mle(self, dist_name):
self.basic_fit_test(dist_name, "mle")
@pytest.mark.parametrize("dist_name", cases_test_fit_mse())
def test_basic_fit_mse(self, dist_name):
self.basic_fit_test(dist_name, "mse")
def test_arcsine(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.arcsine
shapes = (1., 2.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = {'loc': (0.1, 10), 'scale': (0.1, 10)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_argus(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.argus
shapes = (1., 2., 3.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = {'chi': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_foldnorm(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.foldnorm
shapes = (1.952125337355587, 2., 3.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = {'c': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_truncpareto(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.truncpareto
shapes = (1.8, 5.3, 2.3, 4.1)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0.1, 10)]*4
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_truncweibull_min(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.truncweibull_min
shapes = (2.5, 0.25, 1.75, 2., 3.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0.1, 10)]*5
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_missing_shape_bounds(self):
# some distributions have a small domain w.r.t. a parameter, e.g.
# $p \in [0, 1]$ for binomial distribution
# User does not need to provide these because the intersection of the
# user's bounds (none) and the distribution's domain is finite
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.binom
n, p, loc = 10, 0.65, 0
data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
shape_bounds = {'n': np.array([0, 20])} # check arrays are OK, too
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params, (n, p, loc), **self.tols)
dist = stats.bernoulli
p, loc = 0.314159, 0
data = dist.rvs(p, loc=loc, size=N, random_state=rng)
res = stats.fit(dist, data, optimizer=self.opt)
assert_allclose(res.params, (p, loc), **self.tols)
def test_fit_only_loc_scale(self):
# fit only loc
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.norm
loc, scale = 1.5, 1
data = dist.rvs(loc=loc, size=N, random_state=rng)
loc_bounds = (0, 5)
bounds = {'loc': loc_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
# fit only scale
loc, scale = 0, 2.5
data = dist.rvs(scale=scale, size=N, random_state=rng)
scale_bounds = (0.01, 5)
bounds = {'scale': scale_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
# fit only loc and scale
dist = stats.norm
loc, scale = 1.5, 2.5
data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
bounds = {'loc': loc_bounds, 'scale': scale_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
def test_everything_fixed(self):
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.norm
loc, scale = 1.5, 2.5
data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
# loc, scale fixed to 0, 1 by default
res = stats.fit(dist, data)
assert_allclose(res.params, (0, 1), **self.tols)
# loc, scale explicitly fixed
bounds = {'loc': (loc, loc), 'scale': (scale, scale)}
res = stats.fit(dist, data, bounds)
assert_allclose(res.params, (loc, scale), **self.tols)
# `n` gets fixed during polishing
dist = stats.binom
n, p, loc = 10, 0.65, 0
data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
shape_bounds = {'n': (0, 20), 'p': (0.65, 0.65)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params, (n, p, loc), **self.tols)
def test_failure(self):
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.nbinom
shapes = (5, 0.5)
data = dist.rvs(*shapes, size=N, random_state=rng)
assert data.min() == 0
# With lower bounds on location at 0.5, likelihood is zero
bounds = [(0, 30), (0, 1), (0.5, 10)]
res = stats.fit(dist, data, bounds)
message = "Optimization converged to parameter values that are"
assert res.message.startswith(message)
assert res.success is False
@pytest.mark.xslow
def test_guess(self):
# Test that guess helps DE find the desired solution
N = 2000
# With some seeds, `fit` doesn't need a guess
rng = np.random.default_rng(1963904448561)
dist = stats.nhypergeom
params = (20, 7, 12, 0)
bounds = [(2, 200), (0.7, 70), (1.2, 120), (0, 10)]
data = dist.rvs(*params, size=N, random_state=rng)
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert not np.allclose(res.params, params, **self.tols)
res = stats.fit(dist, data, bounds, guess=params, optimizer=self.opt)
assert_allclose(res.params, params, **self.tols)
def test_mse_accuracy_1(self):
# Test maximum spacing estimation against example from Wikipedia
# https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples
data = [2, 4]
dist = stats.expon
bounds = {'loc': (0, 0), 'scale': (1e-8, 10)}
res_mle = stats.fit(dist, data, bounds=bounds, method='mle')
assert_allclose(res_mle.params.scale, 3, atol=1e-3)
res_mse = stats.fit(dist, data, bounds=bounds, method='mse')
assert_allclose(res_mse.params.scale, 3.915, atol=1e-3)
def test_mse_accuracy_2(self):
# Test maximum spacing estimation against example from Wikipedia
# https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples
rng = np.random.default_rng(9843212616816518964)
dist = stats.uniform
n = 10
data = dist(3, 6).rvs(size=n, random_state=rng)
bounds = {'loc': (0, 10), 'scale': (1e-8, 10)}
res = stats.fit(dist, data, bounds=bounds, method='mse')
# (loc=3.608118420015416, scale=5.509323262055043)
x = np.sort(data)
a = (n*x[0] - x[-1])/(n - 1)
b = (n*x[-1] - x[0])/(n - 1)
ref = a, b-a # (3.6081133632151503, 5.509328130317254)
assert_allclose(res.params, ref, rtol=1e-4)
# Data from Matlab: https://www.mathworks.com/help/stats/lillietest.html
examgrades = [65, 61, 81, 88, 69, 89, 55, 84, 86, 84, 71, 81, 84, 81, 78, 67,
96, 66, 73, 75, 59, 71, 69, 63, 79, 76, 63, 85, 87, 88, 80, 71,
65, 84, 71, 75, 81, 79, 64, 65, 84, 77, 70, 75, 84, 75, 73, 92,
90, 79, 80, 71, 73, 71, 58, 79, 73, 64, 77, 82, 81, 59, 54, 82,
57, 79, 79, 73, 74, 82, 63, 64, 73, 69, 87, 68, 81, 73, 83, 73,
80, 73, 73, 71, 66, 78, 64, 74, 68, 67, 75, 75, 80, 85, 74, 76,
80, 77, 93, 70, 86, 80, 81, 83, 68, 60, 85, 64, 74, 82, 81, 77,
66, 85, 75, 81, 69, 60, 83, 72]
class TestGoodnessOfFit:
def test_gof_iv(self):
dist = stats.norm
x = [1, 2, 3]
message = r"`dist` must be a \(non-frozen\) instance of..."
with pytest.raises(TypeError, match=message):
goodness_of_fit(stats.norm(), x)
message = "`data` must be a one-dimensional array of numbers."
with pytest.raises(ValueError, match=message):
goodness_of_fit(dist, [[1, 2, 3]])
message = "`statistic` must be one of..."
with pytest.raises(ValueError, match=message):
goodness_of_fit(dist, x, statistic='mm')
message = "`n_mc_samples` must be an integer."
with pytest.raises(TypeError, match=message):
goodness_of_fit(dist, x, n_mc_samples=1000.5)
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
goodness_of_fit(dist, x, random_state='herring')
def test_against_ks(self):
rng = np.random.default_rng(8517426291317196949)
x = examgrades
known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ks', random_state=rng)
ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
assert_allclose(res.statistic, ref.statistic) # ~0.0848
assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.335
def test_against_lilliefors(self):
rng = np.random.default_rng(2291803665717442724)
x = examgrades
res = goodness_of_fit(stats.norm, x, statistic='ks', random_state=rng)
known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
assert_allclose(res.statistic, ref.statistic) # ~0.0848
assert_allclose(res.pvalue, 0.0348, atol=5e-3)
def test_against_cvm(self):
rng = np.random.default_rng(8674330857509546614)
x = examgrades
known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='cvm', random_state=rng)
ref = stats.cramervonmises(x, stats.norm(**known_params).cdf)
assert_allclose(res.statistic, ref.statistic) # ~0.090
assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.636
def test_against_anderson_case_0(self):
# "Case 0" is where loc and scale are known [1]
rng = np.random.default_rng(7384539336846690410)
x = np.arange(1, 101)
# loc that produced critical value of statistic found w/ root_scalar
known_params = {'loc': 45.01575354024957, 'scale': 30}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ad', random_state=rng)
assert_allclose(res.statistic, 2.492) # See [1] Table 1A 1.0
assert_allclose(res.pvalue, 0.05, atol=5e-3)
def test_against_anderson_case_1(self):
# "Case 1" is where scale is known and loc is fit [1]
rng = np.random.default_rng(5040212485680146248)
x = np.arange(1, 101)
# scale that produced critical value of statistic found w/ root_scalar
known_params = {'scale': 29.957112639101933}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ad', random_state=rng)
assert_allclose(res.statistic, 0.908) # See [1] Table 1B 1.1
assert_allclose(res.pvalue, 0.1, atol=5e-3)
def test_against_anderson_case_2(self):
# "Case 2" is where loc is known and scale is fit [1]
rng = np.random.default_rng(726693985720914083)
x = np.arange(1, 101)
# loc that produced critical value of statistic found w/ root_scalar
known_params = {'loc': 44.5680212261933}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ad', random_state=rng)
assert_allclose(res.statistic, 2.904) # See [1] Table 1B 1.2
assert_allclose(res.pvalue, 0.025, atol=5e-3)
def test_against_anderson_case_3(self):
# "Case 3" is where both loc and scale are fit [1]
rng = np.random.default_rng(6763691329830218206)
# c that produced critical value of statistic found w/ root_scalar
x = stats.skewnorm.rvs(1.4477847789132101, loc=1, scale=2, size=100,
random_state=rng)
res = goodness_of_fit(stats.norm, x, statistic='ad', random_state=rng)
assert_allclose(res.statistic, 0.559) # See [1] Table 1B 1.2
assert_allclose(res.pvalue, 0.15, atol=5e-3)
@pytest.mark.slow
def test_against_anderson_gumbel_r(self):
rng = np.random.default_rng(7302761058217743)
# c that produced critical value of statistic found w/ root_scalar
x = stats.genextreme(0.051896837188595134, loc=0.5,
scale=1.5).rvs(size=1000, random_state=rng)
res = goodness_of_fit(stats.gumbel_r, x, statistic='ad',
random_state=rng)
ref = stats.anderson(x, dist='gumbel_r')
assert_allclose(res.statistic, ref.critical_values[0])
assert_allclose(res.pvalue, ref.significance_level[0]/100, atol=5e-3)
def test_against_filliben_norm(self):
# Test against `stats.fit` ref. [7] Section 8 "Example"
rng = np.random.default_rng(8024266430745011915)
y = [6, 1, -4, 8, -2, 5, 0]
known_params = {'loc': 0, 'scale': 1}
res = stats.goodness_of_fit(stats.norm, y, known_params=known_params,
statistic="filliben", random_state=rng)
# Slight discrepancy presumably due to roundoff in Filliben's
# calculation. Using exact order statistic medians instead of
# Filliben's approximation doesn't account for it.
assert_allclose(res.statistic, 0.98538, atol=1e-4)
assert 0.75 < res.pvalue < 0.9
# Using R's ppcc library:
# library(ppcc)
# options(digits=16)
# x < - c(6, 1, -4, 8, -2, 5, 0)
# set.seed(100)
# ppccTest(x, "qnorm", ppos="Filliben")
# Discrepancy with
assert_allclose(res.statistic, 0.98540957187084, rtol=2e-5)
assert_allclose(res.pvalue, 0.8875, rtol=2e-3)
def test_filliben_property(self):
# Filliben's statistic should be independent of data location and scale
rng = np.random.default_rng(8535677809395478813)
x = rng.normal(loc=10, scale=0.5, size=100)
res = stats.goodness_of_fit(stats.norm, x,
statistic="filliben", random_state=rng)
known_params = {'loc': 0, 'scale': 1}
ref = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
statistic="filliben", random_state=rng)
assert_allclose(res.statistic, ref.statistic, rtol=1e-15)
@pytest.mark.parametrize('case', [(25, [.928, .937, .950, .958, .966]),
(50, [.959, .965, .972, .977, .981]),
(95, [.977, .979, .983, .986, .989])])
def test_against_filliben_norm_table(self, case):
# Test against `stats.fit` ref. [7] Table 1
rng = np.random.default_rng(504569995557928957)
n, ref = case
x = rng.random(n)
known_params = {'loc': 0, 'scale': 1}
res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
statistic="filliben", random_state=rng)
percentiles = np.array([0.005, 0.01, 0.025, 0.05, 0.1])
res = stats.scoreatpercentile(res.null_distribution, percentiles*100)
assert_allclose(res, ref, atol=2e-3)
@pytest.mark.slow
@pytest.mark.parametrize('case', [(5, 0.95772790260469, 0.4755),
(6, 0.95398832257958, 0.3848),
(7, 0.9432692889277, 0.2328)])
def test_against_ppcc(self, case):
# Test against R ppcc, e.g.
# library(ppcc)
# options(digits=16)
# x < - c(0.52325412, 1.06907699, -0.36084066, 0.15305959, 0.99093194)
# set.seed(100)
# ppccTest(x, "qrayleigh", ppos="Filliben")
n, ref_statistic, ref_pvalue = case
rng = np.random.default_rng(7777775561439803116)
x = rng.normal(size=n)
res = stats.goodness_of_fit(stats.rayleigh, x, statistic="filliben",
random_state=rng)
assert_allclose(res.statistic, ref_statistic, rtol=1e-4)
assert_allclose(res.pvalue, ref_pvalue, atol=1.5e-2)
def test_params_effects(self):
# Ensure that `guessed_params`, `fit_params`, and `known_params` have
# the intended effects.
rng = np.random.default_rng(9121950977643805391)
x = stats.skewnorm.rvs(-5.044559778383153, loc=1, scale=2, size=50,
random_state=rng)
# Show that `guessed_params` don't fit to the guess,
# but `fit_params` and `known_params` respect the provided fit
guessed_params = {'c': 13.4}
fit_params = {'scale': 13.73}
known_params = {'loc': -13.85}
rng = np.random.default_rng(9121950977643805391)
res1 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
guessed_params=guessed_params,
fit_params=fit_params,
known_params=known_params, random_state=rng)
assert not np.allclose(res1.fit_result.params.c, 13.4)
assert_equal(res1.fit_result.params.scale, 13.73)
assert_equal(res1.fit_result.params.loc, -13.85)
# Show that changing the guess changes the parameter that gets fit,
# and it changes the null distribution
guessed_params = {'c': 2}
rng = np.random.default_rng(9121950977643805391)
res2 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
guessed_params=guessed_params,
fit_params=fit_params,
known_params=known_params, random_state=rng)
assert not np.allclose(res2.fit_result.params.c,
res1.fit_result.params.c, rtol=1e-8)
assert not np.allclose(res2.null_distribution,
res1.null_distribution, rtol=1e-8)
assert_equal(res2.fit_result.params.scale, 13.73)
assert_equal(res2.fit_result.params.loc, -13.85)
# If we set all parameters as fit_params and known_params,
# they're all fixed to those values, but the null distribution
# varies.
fit_params = {'c': 13.4, 'scale': 13.73}
rng = np.random.default_rng(9121950977643805391)
res3 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
guessed_params=guessed_params,
fit_params=fit_params,
known_params=known_params, random_state=rng)
assert_equal(res3.fit_result.params.c, 13.4)
assert_equal(res3.fit_result.params.scale, 13.73)
assert_equal(res3.fit_result.params.loc, -13.85)
assert not np.allclose(res3.null_distribution, res1.null_distribution)
class TestFitResult:
def test_plot_iv(self):
rng = np.random.default_rng(1769658657308472721)
data = stats.norm.rvs(0, 1, size=100, random_state=rng)
def optimizer(*args, **kwargs):
return differential_evolution(*args, **kwargs, seed=rng)
bounds = [(0, 30), (0, 1)]
res = stats.fit(stats.norm, data, bounds, optimizer=optimizer)
try:
import matplotlib # noqa
message = r"`plot_type` must be one of \{'..."
with pytest.raises(ValueError, match=message):
res.plot(plot_type='llama')
except (ModuleNotFoundError, ImportError):
message = r"matplotlib must be installed to use method `plot`."
with pytest.raises(ModuleNotFoundError, match=message):
res.plot(plot_type='llama')
| 43,633
| 42.941591
| 99
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_rank.py
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import rankdata, tiecorrect
import pytest
class TestTieCorrect:
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
class TestRankData:
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_rankdata_object_string(self):
def min_rank(a):
return [1 + sum(i < j for i in a) for j in a]
def max_rank(a):
return [sum(i <= j for i in a) for j in a]
def ordinal_rank(a):
return min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
def test_axis(self):
data = [[0, 2, 1],
[4, 2, 2]]
expected0 = [[1., 1.5, 1.],
[2., 1.5, 2.]]
r0 = rankdata(data, axis=0)
assert_array_equal(r0, expected0)
expected1 = [[1., 3., 2.],
[3., 1.5, 1.5]]
r1 = rankdata(data, axis=1)
assert_array_equal(r1, expected1)
methods = ["average", "min", "max", "dense", "ordinal"]
dtypes = [np.float64] + [np.int_]*4
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("method, dtype", zip(methods, dtypes))
def test_size_0_axis(self, axis, method, dtype):
shape = (3, 0)
data = np.zeros(shape)
r = rankdata(data, method=method, axis=axis)
assert_equal(r.shape, shape)
assert_equal(r.dtype, dtype)
@pytest.mark.parametrize('axis', range(3))
@pytest.mark.parametrize('method', methods)
def test_nan_policy_omit_3d(self, axis, method):
shape = (20, 21, 22)
rng = np.random.default_rng(abs(hash('falafel')))
a = rng.random(size=shape)
i = rng.random(size=shape) < 0.4
j = rng.random(size=shape) < 0.1
k = rng.random(size=shape) < 0.1
a[i] = np.nan
a[j] = -np.inf
a[k] - np.inf
def rank_1d_omit(a, method):
out = np.zeros_like(a)
i = np.isnan(a)
a_compressed = a[~i]
res = rankdata(a_compressed, method)
out[~i] = res
out[i] = np.nan
return out
def rank_omit(a, method, axis):
return np.apply_along_axis(lambda a: rank_1d_omit(a, method),
axis, a)
res = rankdata(a, method, axis=axis, nan_policy='omit')
res0 = rank_omit(a, method, axis=axis)
assert_array_equal(res, res0)
def test_nan_policy_2d_axis_none(self):
# 2 2d-array test with axis=None
data = [[0, np.nan, 3],
[4, 2, np.nan],
[1, 2, 2]]
assert_array_equal(rankdata(data, axis=None, nan_policy='omit'),
[1., np.nan, 6., 7., 4., np.nan, 2., 4., 4.])
assert_array_equal(rankdata(data, axis=None, nan_policy='propagate'),
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan])
def test_nan_policy_raise(self):
# 1 1d-array test
data = [0, 2, 3, -2, np.nan, np.nan]
with pytest.raises(ValueError, match="The input contains nan"):
rankdata(data, nan_policy='raise')
# 2 2d-array test
data = [[0, np.nan, 3],
[4, 2, np.nan],
[np.nan, 2, 2]]
with pytest.raises(ValueError, match="The input contains nan"):
rankdata(data, axis=0, nan_policy="raise")
with pytest.raises(ValueError, match="The input contains nan"):
rankdata(data, axis=1, nan_policy="raise")
def test_nan_policy_propagate(self):
# 1 1d-array test
data = [0, 2, 3, -2, np.nan, np.nan]
assert_array_equal(rankdata(data, nan_policy='propagate'),
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
# 2 2d-array test
data = [[0, np.nan, 3],
[4, 2, np.nan],
[1, 2, 2]]
assert_array_equal(rankdata(data, axis=0, nan_policy='propagate'),
[[1, np.nan, np.nan],
[3, np.nan, np.nan],
[2, np.nan, np.nan]])
assert_array_equal(rankdata(data, axis=1, nan_policy='propagate'),
[[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[1, 2.5, 2.5]])
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
for values, method, expected in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected)
| 11,321
| 33.623853
| 77
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_stats.py
|
""" Test functions for stats module
WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE
BASED ON WILKINSON'S STATISTICS QUIZ
https://www.stanford.edu/~clint/bench/wilk.txt
Additional tests by a host of SciPy developers.
"""
import os
import re
import warnings
from collections import namedtuple
from itertools import product
from numpy.testing import (assert_, assert_equal,
assert_almost_equal, assert_array_almost_equal,
assert_array_equal, assert_approx_equal,
assert_allclose, assert_warns, suppress_warnings,
assert_array_less)
import pytest
from pytest import raises as assert_raises
import numpy.ma.testutils as mat
from numpy import array, arange, float32, float64, power
import numpy as np
import scipy.stats as stats
import scipy.stats.mstats as mstats
import scipy.stats._mstats_basic as mstats_basic
from scipy.stats._ksstats import kolmogn
from scipy.special._testutils import FuncData
from scipy.special import binom
from scipy import optimize
from .common_tests import check_named_results
from scipy.spatial.distance import cdist
from numpy.lib import NumpyVersion
from scipy.stats._axis_nan_policy import _broadcast_concatenate
from scipy.stats._stats_py import _permutation_distribution_t
""" Numbers in docstrings beginning with 'W' refer to the section numbers
and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are
considered to be essential functionality. True testing and
evaluation of a statistics package requires use of the
NIST Statistical test data. See McCoullough(1999) Assessing The Reliability
of Statistical Software for a test methodology and its
implementation in testing SAS, SPSS, and S-Plus
"""
# Datasets
# These data sets are from the nasty.dat sets used by Wilkinson
# For completeness, I should write the relevant tests and count them as failures
# Somewhat acceptable, since this is still beta software. It would count as a
# good target for 1.0 status
X = array([1,2,3,4,5,6,7,8,9], float)
ZERO = array([0,0,0,0,0,0,0,0,0], float)
BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,
99999998,99999999], float)
LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,
0.99999997,0.99999998,0.99999999], float)
HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)
TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)
ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)
class TestTrimmedStats:
# TODO: write these tests to handle missing values properly
dprec = np.finfo(np.float64).precision
def test_tmean(self):
y = stats.tmean(X, (2, 8), (True, True))
assert_approx_equal(y, 5.0, significant=self.dprec)
y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))
y2 = stats.tmean(X, limits=None)
assert_approx_equal(y1, y2, significant=self.dprec)
x_2d = arange(63, dtype=float64).reshape(9, 7)
y = stats.tmean(x_2d, axis=None)
assert_approx_equal(y, x_2d.mean(), significant=self.dprec)
y = stats.tmean(x_2d, axis=0)
assert_array_almost_equal(y, x_2d.mean(axis=0), decimal=8)
y = stats.tmean(x_2d, axis=1)
assert_array_almost_equal(y, x_2d.mean(axis=1), decimal=8)
y = stats.tmean(x_2d, limits=(2, 61), axis=None)
assert_approx_equal(y, 31.5, significant=self.dprec)
y = stats.tmean(x_2d, limits=(2, 21), axis=0)
y_true = [14, 11.5, 9, 10, 11, 12, 13]
assert_array_almost_equal(y, y_true, decimal=8)
y = stats.tmean(x_2d, limits=(2, 21), inclusive=(True, False), axis=0)
y_true = [10.5, 11.5, 9, 10, 11, 12, 13]
assert_array_almost_equal(y, y_true, decimal=8)
x_2d_with_nan = np.array(x_2d)
x_2d_with_nan[-1, -3:] = np.nan
y = stats.tmean(x_2d_with_nan, limits=(1, 13), axis=0)
y_true = [7, 4.5, 5.5, 6.5, np.nan, np.nan, np.nan]
assert_array_almost_equal(y, y_true, decimal=8)
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "Mean of empty slice")
y = stats.tmean(x_2d, limits=(2, 21), axis=1)
y_true = [4, 10, 17, 21, np.nan, np.nan, np.nan, np.nan, np.nan]
assert_array_almost_equal(y, y_true, decimal=8)
y = stats.tmean(x_2d, limits=(2, 21),
inclusive=(False, True), axis=1)
y_true = [4.5, 10, 17, 21, np.nan, np.nan, np.nan, np.nan, np.nan]
assert_array_almost_equal(y, y_true, decimal=8)
def test_tvar(self):
y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))
assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)
y = stats.tvar(X, limits=None)
assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)
x_2d = arange(63, dtype=float64).reshape((9, 7))
y = stats.tvar(x_2d, axis=None)
assert_approx_equal(y, x_2d.var(ddof=1), significant=self.dprec)
y = stats.tvar(x_2d, axis=0)
assert_array_almost_equal(y[0], np.full((1, 7), 367.50000000), decimal=8)
y = stats.tvar(x_2d, axis=1)
assert_array_almost_equal(y[0], np.full((1, 9), 4.66666667), decimal=8)
y = stats.tvar(x_2d[3, :])
assert_approx_equal(y, 4.666666666666667, significant=self.dprec)
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "Degrees of freedom <= 0 for slice.")
# Limiting some values along one axis
y = stats.tvar(x_2d, limits=(1, 5), axis=1, inclusive=(True, True))
assert_approx_equal(y[0], 2.5, significant=self.dprec)
# Limiting all values along one axis
y = stats.tvar(x_2d, limits=(0, 6), axis=1, inclusive=(True, True))
assert_approx_equal(y[0], 4.666666666666667, significant=self.dprec)
assert_equal(y[1], np.nan)
def test_tstd(self):
y = stats.tstd(X, (2, 8), (True, True))
assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)
y = stats.tstd(X, limits=None)
assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)
def test_tmin(self):
assert_equal(stats.tmin(4), 4)
x = np.arange(10)
assert_equal(stats.tmin(x), 0)
assert_equal(stats.tmin(x, lowerlimit=0), 0)
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)
x = x.reshape((5, 2))
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])
assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])
assert_equal(stats.tmin(x, axis=None), 0)
x = np.arange(10.)
x[9] = np.nan
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmin(x), np.nan)
assert_equal(stats.tmin(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.tmin, x, nan_policy='raise')
assert_raises(ValueError, stats.tmin, x, nan_policy='foobar')
msg = "'propagate', 'raise', 'omit'"
with assert_raises(ValueError, match=msg):
stats.tmin(x, nan_policy='foo')
def test_tmax(self):
assert_equal(stats.tmax(4), 4)
x = np.arange(10)
assert_equal(stats.tmax(x), 9)
assert_equal(stats.tmax(x, upperlimit=9), 9)
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)
x = x.reshape((5, 2))
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])
assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])
assert_equal(stats.tmax(x, axis=None), 9)
x = np.arange(10.)
x[6] = np.nan
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmax(x), np.nan)
assert_equal(stats.tmax(x, nan_policy='omit'), 9.)
assert_raises(ValueError, stats.tmax, x, nan_policy='raise')
assert_raises(ValueError, stats.tmax, x, nan_policy='foobar')
def test_tsem(self):
y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))
y_ref = np.array([4, 5, 6, 7, 8])
assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),
significant=self.dprec)
assert_approx_equal(stats.tsem(X, limits=[-1, 10]),
stats.tsem(X, limits=None),
significant=self.dprec)
class TestCorrPearsonr:
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN correlations, if
your program has them.
"""
def test_pXX(self):
y = stats.pearsonr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXBIG(self):
y = stats.pearsonr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXLITTLE(self):
y = stats.pearsonr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXHUGE(self):
y = stats.pearsonr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXTINY(self):
y = stats.pearsonr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXROUND(self):
y = stats.pearsonr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGBIG(self):
y = stats.pearsonr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGLITTLE(self):
y = stats.pearsonr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGHUGE(self):
y = stats.pearsonr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGTINY(self):
y = stats.pearsonr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGROUND(self):
y = stats.pearsonr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLELITTLE(self):
y = stats.pearsonr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEHUGE(self):
y = stats.pearsonr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLETINY(self):
y = stats.pearsonr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEROUND(self):
y = stats.pearsonr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEHUGE(self):
y = stats.pearsonr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGETINY(self):
y = stats.pearsonr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEROUND(self):
y = stats.pearsonr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYTINY(self):
y = stats.pearsonr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYROUND(self):
y = stats.pearsonr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pROUNDROUND(self):
y = stats.pearsonr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pearsonr_result_attributes(self):
res = stats.pearsonr(X, X)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
assert_equal(res.correlation, res.statistic)
def test_r_almost_exactly_pos1(self):
a = arange(3.0)
r, prob = stats.pearsonr(a, a)
assert_allclose(r, 1.0, atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
def test_r_almost_exactly_neg1(self):
a = arange(3.0)
r, prob = stats.pearsonr(a, -a)
assert_allclose(r, -1.0, atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
def test_basic(self):
# A basic test, with a correlation coefficient
# that is not 1 or -1.
a = array([-1, 0, 1])
b = array([0, 0, 3])
r, prob = stats.pearsonr(a, b)
assert_approx_equal(r, np.sqrt(3)/2)
assert_approx_equal(prob, 1/3)
def test_constant_input(self):
# Zero variance input
# See https://github.com/scipy/scipy/issues/3728
msg = "An input array is constant"
with assert_warns(stats.ConstantInputWarning, match=msg):
r, p = stats.pearsonr([0.667, 0.667, 0.667], [0.123, 0.456, 0.789])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_near_constant_input(self):
# Near constant input (but not constant):
x = [2, 2, 2 + np.spacing(2)]
y = [3, 3, 3 + 6*np.spacing(3)]
msg = "An input array is nearly constant; the computed"
with assert_warns(stats.NearConstantInputWarning, match=msg):
# r and p are garbage, so don't bother checking them in this case.
# (The exact value of r would be 1.)
r, p = stats.pearsonr(x, y)
def test_very_small_input_values(self):
# Very small values in an input. A naive implementation will
# suffer from underflow.
# See https://github.com/scipy/scipy/issues/9353
x = [0.004434375, 0.004756007, 0.003911996, 0.0038005, 0.003409971]
y = [2.48e-188, 7.41e-181, 4.09e-208, 2.08e-223, 2.66e-245]
r, p = stats.pearsonr(x,y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.7272930540750450)
assert_allclose(p, 0.1637805429533202)
def test_very_large_input_values(self):
# Very large values in an input. A naive implementation will
# suffer from overflow.
# See https://github.com/scipy/scipy/issues/8980
x = 1e90*np.array([0, 0, 0, 1, 1, 1, 1])
y = 1e90*np.arange(7)
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.8660254037844386)
assert_allclose(p, 0.011724811003954638)
def test_extremely_large_input_values(self):
# Extremely large values in x and y. These values would cause the
# product sigma_x * sigma_y to overflow if the two factors were
# computed independently.
x = np.array([2.3e200, 4.5e200, 6.7e200, 8e200])
y = np.array([1.2e199, 5.5e200, 3.3e201, 1.0e200])
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.351312332103289)
assert_allclose(p, 0.648687667896711)
def test_length_two_pos1(self):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
res = stats.pearsonr([1, 2], [3, 5])
r, p = res
assert_equal(r, 1)
assert_equal(p, 1)
assert_equal(res.confidence_interval(), (-1, 1))
def test_length_two_neg2(self):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
r, p = stats.pearsonr([2, 1], [3, 5])
assert_equal(r, -1)
assert_equal(p, 1)
# Expected values computed with R 3.6.2 cor.test, e.g.
# options(digits=16)
# x <- c(1, 2, 3, 4)
# y <- c(0, 1, 0.5, 1)
# cor.test(x, y, method = "pearson", alternative = "g")
# correlation coefficient and p-value for alternative='two-sided'
# calculated with mpmath agree to 16 digits.
@pytest.mark.parametrize('alternative, pval, rlow, rhigh, sign',
[('two-sided', 0.325800137536, -0.814938968841, 0.99230697523, 1), # noqa
('less', 0.8370999312316, -1, 0.985600937290653, 1),
('greater', 0.1629000687684, -0.6785654158217636, 1, 1),
('two-sided', 0.325800137536, -0.992306975236, 0.81493896884, -1),
('less', 0.1629000687684, -1.0, 0.6785654158217636, -1),
('greater', 0.8370999312316, -0.985600937290653, 1.0, -1)])
def test_basic_example(self, alternative, pval, rlow, rhigh, sign):
x = [1, 2, 3, 4]
y = np.array([0, 1, 0.5, 1]) * sign
result = stats.pearsonr(x, y, alternative=alternative)
assert_allclose(result.statistic, 0.6741998624632421*sign, rtol=1e-12)
assert_allclose(result.pvalue, pval, rtol=1e-6)
ci = result.confidence_interval()
assert_allclose(ci, (rlow, rhigh), rtol=1e-6)
def test_negative_correlation_pvalue_gh17795(self):
x = np.arange(10)
y = -x
test_greater = stats.pearsonr(x, y, alternative='greater')
test_less = stats.pearsonr(x, y, alternative='less')
assert_allclose(test_greater.pvalue, 1)
assert_allclose(test_less.pvalue, 0, atol=1e-20)
def test_length3_r_exactly_negative_one(self):
x = [1, 2, 3]
y = [5, -4, -13]
res = stats.pearsonr(x, y)
# The expected r and p are exact.
r, p = res
assert_allclose(r, -1.0)
assert_allclose(p, 0.0, atol=1e-7)
assert_equal(res.confidence_interval(), (-1, 1))
def test_unequal_lengths(self):
x = [1, 2, 3]
y = [4, 5]
assert_raises(ValueError, stats.pearsonr, x, y)
def test_len1(self):
x = [1]
y = [2]
assert_raises(ValueError, stats.pearsonr, x, y)
def test_complex_data(self):
x = [-1j, -2j, -3.0j]
y = [-1j, -2j, -3.0j]
message = 'This function does not support complex data'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, y)
@pytest.mark.xslow
@pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
@pytest.mark.parametrize('method', ('permutation', 'monte_carlo'))
def test_resampling_pvalue(self, method, alternative):
rng = np.random.default_rng(24623935790378923)
size = 100 if method == 'permutation' else 1000
x = rng.normal(size=size)
y = rng.normal(size=size)
methods = {'permutation': stats.PermutationMethod(random_state=rng),
'monte_carlo': stats.MonteCarloMethod(rvs=(rng.normal,)*2)}
method = methods[method]
res = stats.pearsonr(x, y, alternative=alternative, method=method)
ref = stats.pearsonr(x, y, alternative=alternative)
assert_allclose(res.statistic, ref.statistic, rtol=1e-15)
assert_allclose(res.pvalue, ref.pvalue, rtol=1e-2, atol=1e-3)
@pytest.mark.xslow
@pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
def test_bootstrap_ci(self, alternative):
rng = np.random.default_rng(24623935790378923)
x = rng.normal(size=100)
y = rng.normal(size=100)
res = stats.pearsonr(x, y, alternative=alternative)
method = stats.BootstrapMethod(random_state=rng)
res_ci = res.confidence_interval(method=method)
ref_ci = res.confidence_interval()
assert_allclose(res_ci, ref_ci, atol=1e-2)
def test_invalid_method(self):
message = "`method` must be an instance of..."
with pytest.raises(ValueError, match=message):
stats.pearsonr([1, 2], [3, 4], method="asymptotic")
res = stats.pearsonr([1, 2], [3, 4])
with pytest.raises(ValueError, match=message):
res.confidence_interval(method="exact")
class TestFisherExact:
"""Some tests to show that fisher_exact() works correctly.
Note that in SciPy 0.9.0 this was not working well for large numbers due to
inaccuracy of the hypergeom distribution (see #1218). Fixed now.
Also note that R and SciPy have different argument formats for their
hypergeometric distribution functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
def test_basic(self):
fisher_exact = stats.fisher_exact
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
def test_precise(self):
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = stats.fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
def test_gh4130(self):
# Previously, a fudge factor used to distinguish between theoeretically
# and numerically different probability masses was 1e-4; it has been
# tightened to fix gh4130. Accuracy checked against R fisher.test.
# options(digits=16)
# table <- matrix(c(6, 108, 37, 200), nrow = 2)
# fisher.test(table, alternative = "t")
x = [[6, 37], [108, 200]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 0.005092697748126)
# case from https://github.com/brentp/fishers_exact_test/issues/27
# That package has an (absolute?) fudge factor of 1e-6; too big
x = [[22, 0], [0, 102]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 7.175066786244549e-25)
# case from https://github.com/brentp/fishers_exact_test/issues/1
x = [[94, 48], [3577, 16988]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 2.069356340993818e-37)
def test_gh9231(self):
# Previously, fisher_exact was extremely slow for this table
# As reported in gh-9231, the p-value should be very nearly zero
x = [[5829225, 5692693], [5760959, 5760959]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 0, atol=1e-170)
@pytest.mark.slow
def test_large_numbers(self):
# Test with some large numbers. Regression test for #1401
pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R
for pval, num in zip(pvals, [75, 76, 77]):
res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
assert_approx_equal(res, pval, significant=4)
res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
def test_raises(self):
# test we raise an error for wrong shape of input.
assert_raises(ValueError, stats.fisher_exact,
np.arange(6).reshape(2, 3))
def test_row_or_col_zero(self):
tables = ([[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]])
for table in tables:
oddsratio, pval = stats.fisher_exact(table)
assert_equal(pval, 1.0)
assert_equal(oddsratio, np.nan)
def test_less_greater(self):
tables = (
# Some tables to compare with R:
[[2, 7], [8, 2]],
[[200, 7], [8, 300]],
[[28, 21], [6, 1957]],
[[190, 800], [200, 900]],
# Some tables with simple exact values
# (includes regression test for ticket #1568):
[[0, 2], [3, 0]],
[[1, 1], [2, 1]],
[[2, 0], [1, 2]],
[[0, 1], [2, 3]],
[[1, 0], [1, 4]],
)
pvals = (
# from R:
[0.018521725952066501, 0.9990149169715733],
[1.0, 2.0056578803889148e-122],
[1.0, 5.7284374608319831e-44],
[0.7416227, 0.2959826],
# Exact:
[0.1, 1.0],
[0.7, 0.9],
[1.0, 0.3],
[2./3, 1.0],
[1.0, 1./3],
)
for table, pval in zip(tables, pvals):
res = []
res.append(stats.fisher_exact(table, alternative="less")[1])
res.append(stats.fisher_exact(table, alternative="greater")[1])
assert_allclose(res, pval, atol=0, rtol=1e-7)
def test_gh3014(self):
# check if issue #3014 has been fixed.
# before, this would have risen a ValueError
odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_result(self, alternative):
table = np.array([[14500, 20000], [30000, 40000]])
res = stats.fisher_exact(table, alternative=alternative)
assert_equal((res.statistic, res.pvalue), res)
class TestCorrSpearmanr:
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN correlations, if
your program has them.
"""
def test_scalar(self):
y = stats.spearmanr(4., 2.)
assert_(np.isnan(y).all())
def test_uneven_lengths(self):
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9])
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8)
def test_uneven_2d_shapes(self):
# Different number of columns should work - those just get concatenated.
np.random.seed(232324)
x = np.random.randn(4, 3)
y = np.random.randn(4, 2)
assert stats.spearmanr(x, y).statistic.shape == (5, 5)
assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5)
assert_raises(ValueError, stats.spearmanr, x, y, axis=1)
assert_raises(ValueError, stats.spearmanr, x.T, y.T)
def test_ndim_too_high(self):
np.random.seed(232324)
x = np.random.randn(4, 3, 2)
assert_raises(ValueError, stats.spearmanr, x)
assert_raises(ValueError, stats.spearmanr, x, x)
assert_raises(ValueError, stats.spearmanr, x, None, None)
# But should work with axis=None (raveling axes) for two input arrays
assert_allclose(stats.spearmanr(x, x, axis=None),
stats.spearmanr(x.flatten(), x.flatten(), axis=0))
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0.0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
def test_nan_policy_bug_12458(self):
np.random.seed(5)
x = np.random.rand(5, 10)
k = 6
x[:, k] = np.nan
y = np.delete(x, k, axis=1)
corx, px = stats.spearmanr(x, nan_policy='omit')
cory, py = stats.spearmanr(y)
corx = np.delete(np.delete(corx, k, axis=1), k, axis=0)
px = np.delete(np.delete(px, k, axis=1), k, axis=0)
assert_allclose(corx, cory, atol=1e-14)
assert_allclose(px, py, atol=1e-14)
def test_nan_policy_bug_12411(self):
np.random.seed(5)
m = 5
n = 10
x = np.random.randn(m, n)
x[1, 0] = np.nan
x[3, -1] = np.nan
corr, pvalue = stats.spearmanr(x, axis=1, nan_policy="propagate")
res = [[stats.spearmanr(x[i, :], x[j, :]).statistic for i in range(m)]
for j in range(m)]
assert_allclose(corr, res)
def test_sXX(self):
y = stats.spearmanr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXBIG(self):
y = stats.spearmanr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXLITTLE(self):
y = stats.spearmanr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXHUGE(self):
y = stats.spearmanr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXTINY(self):
y = stats.spearmanr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXROUND(self):
y = stats.spearmanr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGBIG(self):
y = stats.spearmanr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGLITTLE(self):
y = stats.spearmanr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGHUGE(self):
y = stats.spearmanr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGTINY(self):
y = stats.spearmanr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGROUND(self):
y = stats.spearmanr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLELITTLE(self):
y = stats.spearmanr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEHUGE(self):
y = stats.spearmanr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLETINY(self):
y = stats.spearmanr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEROUND(self):
y = stats.spearmanr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEHUGE(self):
y = stats.spearmanr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGETINY(self):
y = stats.spearmanr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEROUND(self):
y = stats.spearmanr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYTINY(self):
y = stats.spearmanr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYROUND(self):
y = stats.spearmanr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sROUNDROUND(self):
y = stats.spearmanr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_spearmanr_result_attributes(self):
res = stats.spearmanr(X, X)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
assert_equal(res.correlation, res.statistic)
def test_1d_vs_2d(self):
x1 = [1, 2, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 6, 5]
res1 = stats.spearmanr(x1, x2)
res2 = stats.spearmanr(np.asarray([x1, x2]).T)
assert_allclose(res1, res2)
def test_1d_vs_2d_nans(self):
# Now the same with NaNs present. Regression test for gh-9103.
for nan_policy in ['propagate', 'omit']:
x1 = [1, np.nan, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy)
res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy)
assert_allclose(res1, res2)
def test_3cols(self):
x1 = np.arange(6)
x2 = -x1
x3 = np.array([0, 1, 2, 3, 5, 4])
x = np.asarray([x1, x2, x3]).T
actual = stats.spearmanr(x)
expected_corr = np.array([[1, -1, 0.94285714],
[-1, 1, -0.94285714],
[0.94285714, -0.94285714, 1]])
expected_pvalue = np.zeros((3, 3), dtype=float)
expected_pvalue[2, 0:2] = 0.00480466472
expected_pvalue[0:2, 2] = 0.00480466472
assert_allclose(actual.statistic, expected_corr)
assert_allclose(actual.pvalue, expected_pvalue)
def test_gh_9103(self):
# Regression test for gh-9103.
x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2],
[5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1],
[0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T
corr = np.array([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1.]])
assert_allclose(stats.spearmanr(x, nan_policy='propagate').statistic,
corr)
res = stats.spearmanr(x, nan_policy='omit').statistic
assert_allclose((res[0][1], res[0][2], res[1][2]),
(0.2051957, 0.4857143, -0.4707919), rtol=1e-6)
def test_gh_8111(self):
# Regression test for gh-8111 (different result for float/int/bool).
n = 100
np.random.seed(234568)
x = np.random.rand(n)
m = np.random.rand(n) > 0.7
# bool against float, no nans
a = (x > .5)
b = np.array(x)
res1 = stats.spearmanr(a, b, nan_policy='omit').statistic
# bool against float with NaNs
b[m] = np.nan
res2 = stats.spearmanr(a, b, nan_policy='omit').statistic
# int against float with NaNs
a = a.astype(np.int32)
res3 = stats.spearmanr(a, b, nan_policy='omit').statistic
expected = [0.865895477, 0.866100381, 0.866100381]
assert_allclose([res1, res2, res3], expected)
class TestCorrSpearmanr2:
"""Some further tests of the spearmanr function."""
def test_spearmanr_vs_r(self):
# Cross-check with R:
# cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr")
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
expected = (0.82078268166812329, 0.088587005313543798)
res = stats.spearmanr(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
def test_empty_arrays(self):
assert_equal(stats.spearmanr([], []), (np.nan, np.nan))
def test_normal_draws(self):
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.28659685838743354, 6.579862219051161e-11)
res = stats.spearmanr(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
def test_corr_1(self):
assert_approx_equal(stats.spearmanr([1, 1, 2], [1, 1, 2])[0], 1.0)
def test_nan_policies(self):
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_allclose(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
def test_unequal_lengths(self):
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.spearmanr, x, y)
def test_omit_paired_value(self):
x1 = [1, 2, 3, 4]
x2 = [8, 7, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy='omit')
res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit')
assert_equal(res1, res2)
def test_gh_issue_6061_windows_overflow(self):
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
x.append(np.nan)
y.append(3.0)
assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998)
def test_tie0(self):
# with only ties in one or both inputs
warn_msg = "An input array is constant"
with assert_warns(stats.ConstantInputWarning, match=warn_msg):
r, p = stats.spearmanr([2, 2, 2], [2, 2, 2])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr([2, 0, 2], [2, 2, 2])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr([2, 2, 2], [2, 0, 2])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_tie1(self):
# Data
x = [1.0, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 2.0, 3.0]
# Ranks of the data, with tie-handling.
xr = [1.0, 2.0, 3.0, 4.0]
yr = [1.0, 2.5, 2.5, 4.0]
# Result of spearmanr should be the same as applying
# pearsonr to the ranks.
sr = stats.spearmanr(x, y)
pr = stats.pearsonr(xr, yr)
assert_almost_equal(sr, pr)
def test_tie2(self):
# Test tie-handling if inputs contain nan's
# Data without nan's
x1 = [1, 2, 2.5, 2]
y1 = [1, 3, 2.5, 4]
# Same data with nan's
x2 = [1, 2, 2.5, 2, np.nan]
y2 = [1, 3, 2.5, 4, np.nan]
# Results for two data sets should be the same if nan's are ignored
sr1 = stats.spearmanr(x1, y1)
sr2 = stats.spearmanr(x2, y2, nan_policy='omit')
assert_almost_equal(sr1, sr2)
def test_ties_axis_1(self):
z1 = np.array([[1, 1, 1, 1], [1, 2, 3, 4]])
z2 = np.array([[1, 2, 3, 4], [1, 1, 1, 1]])
z3 = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])
warn_msg = "An input array is constant"
with assert_warns(stats.ConstantInputWarning, match=warn_msg):
r, p = stats.spearmanr(z1, axis=1)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr(z2, axis=1)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr(z3, axis=1)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_gh_11111(self):
x = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587,
0.0007535430349118562, 0.0002661781514710257, 0, 0,
0.0007835762419683435])
warn_msg = "An input array is constant"
with assert_warns(stats.ConstantInputWarning, match=warn_msg):
r, p = stats.spearmanr(x, y)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_index_error(self):
x = np.array([1.0, 7.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587,
0.0007535430349118562, 0.0002661781514710257, 0, 0,
0.0007835762419683435])
assert_raises(ValueError, stats.spearmanr, x, y, axis=2)
def test_alternative(self):
# Test alternative parameter
# Simple test - Based on the above ``test_spearmanr_vs_r``
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
# strong positive correlation
expected = (0.82078268166812329, 0.088587005313543798)
# correlation > 0 -> large "less" p-value
res = stats.spearmanr(x1, x2, alternative="less")
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], 1 - (expected[1] / 2))
# correlation > 0 -> small "less" p-value
res = stats.spearmanr(x1, x2, alternative="greater")
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1] / 2)
with pytest.raises(ValueError, match="alternative must be 'less'..."):
stats.spearmanr(x1, x2, alternative="ekki-ekki")
@pytest.mark.parametrize("alternative", ('two-sided', 'less', 'greater'))
def test_alternative_nan_policy(self, alternative):
# Test nan policies
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
x1nan = x1 + [np.nan]
x2nan = x2 + [np.nan]
# test nan_policy="propagate"
assert_array_equal(stats.spearmanr(x1nan, x2nan), (np.nan, np.nan))
# test nan_policy="omit"
res_actual = stats.spearmanr(x1nan, x2nan, nan_policy='omit',
alternative=alternative)
res_expected = stats.spearmanr(x1, x2, alternative=alternative)
assert_allclose(res_actual, res_expected)
# test nan_policy="raise"
message = 'The input contains nan values'
with pytest.raises(ValueError, match=message):
stats.spearmanr(x1nan, x2nan, nan_policy='raise',
alternative=alternative)
# test invalid nan_policy
message = "nan_policy must be one of..."
with pytest.raises(ValueError, match=message):
stats.spearmanr(x1nan, x2nan, nan_policy='ekki-ekki',
alternative=alternative)
# W.II.E. Tabulate X against X, using BIG as a case weight. The values
# should appear on the diagonal and the total should be 899999955.
# If the table cannot hold these values, forget about working with
# census data. You can also tabulate HUGE against TINY. There is no
# reason a tabulation program should not be able to distinguish
# different values regardless of their magnitude.
# I need to figure out how to do this one.
def test_kendalltau():
# For the cases without ties, both variants should give the same
# result.
variants = ('b', 'c')
# case without ties, con-dis equal zero
x = [5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 6, 3, 1, 8, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.0, 1.0)
for taux in variants:
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# case without ties, con-dis equal zero
x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.0, 1.0)
for taux in variants:
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# case without ties, con-dis close to zero
x = [5, 2, 1, 3, 6, 4, 7]
y = [5, 2, 6, 3, 1, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.14285714286, 0.77261904762)
for taux in variants:
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# case without ties, con-dis close to zero
x = [2, 1, 3, 6, 4, 7, 8]
y = [2, 6, 3, 1, 8, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.047619047619, 1.0)
for taux in variants:
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# simple case without ties
x = np.arange(10)
y = np.arange(10)
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (1.0, 5.511463844797e-07)
for taux in variants:
res = stats.kendalltau(x, y, variant=taux)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.9555555555555556, 5.511463844797e-06)
for taux in variants:
res = stats.kendalltau(x, y, variant=taux)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.9111111111111111, 2.976190476190e-05)
for taux in variants:
res = stats.kendalltau(x, y, variant=taux)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# same in opposite direction
x = np.arange(10)
y = np.arange(10)[::-1]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-1.0, 5.511463844797e-07)
for taux in variants:
res = stats.kendalltau(x, y, variant=taux)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.9555555555555556, 5.511463844797e-06)
for taux in variants:
res = stats.kendalltau(x, y, variant=taux)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.9111111111111111, 2.976190476190e-05)
for taux in variants:
res = stats.kendalltau(x, y, variant=taux)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# Check a case where variants are different
# Example values found from Kendall (1970).
# P-value is the same for the both variants
x = array([1, 2, 2, 4, 4, 6, 6, 8, 9, 9])
y = array([1, 2, 4, 4, 4, 4, 8, 8, 8, 10])
expected = 0.85895569
assert_approx_equal(stats.kendalltau(x, y, variant='b')[0], expected)
expected = 0.825
assert_approx_equal(stats.kendalltau(x, y, variant='c')[0], expected)
# check exception in case of ties and method='exact' requested
y[2] = y[1]
assert_raises(ValueError, stats.kendalltau, x, y, method='exact')
# check exception in case of invalid method keyword
assert_raises(ValueError, stats.kendalltau, x, y, method='banana')
# check exception in case of invalid variant keyword
assert_raises(ValueError, stats.kendalltau, x, y, variant='rms')
# tau-b with some ties
# Cross-check with R:
# cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE)
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
expected = (-0.47140452079103173, 0.28274545993277478)
res = stats.kendalltau(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
for taux in variants:
res = stats.kendalltau(x1, x2, variant=taux)
check_named_results(res, attributes)
assert_equal(res.correlation, res.statistic)
# with only ties in one or both inputs in tau-b or tau-c
for taux in variants:
assert_equal(stats.kendalltau([2, 2, 2], [2, 2, 2], variant=taux),
(np.nan, np.nan))
assert_equal(stats.kendalltau([2, 0, 2], [2, 2, 2], variant=taux),
(np.nan, np.nan))
assert_equal(stats.kendalltau([2, 2, 2], [2, 0, 2], variant=taux),
(np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.kendalltau([], []), (np.nan, np.nan))
# check with larger arrays
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.19291382765531062, 1.1337095377742629e-10)
res = stats.kendalltau(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# this should result in 1 for taub but not tau-c
assert_approx_equal(stats.kendalltau([1, 1, 2], [1, 1, 2], variant='b')[0],
1.0)
assert_approx_equal(stats.kendalltau([1, 1, 2], [1, 1, 2], variant='c')[0],
0.88888888)
# test nan_policy
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan))
assert_allclose(stats.kendalltau(x, x, nan_policy='omit'),
(1.0, 5.5114638e-6), rtol=1e-06)
assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'),
(1.0, 0.00017455009626808976), rtol=1e-06)
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar')
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.kendalltau, x, y)
# test all ties
tau, p_value = stats.kendalltau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.kendalltau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Regression test for GitHub issue #6061 - Overflow on Windows
x = np.arange(2000, dtype=float)
x = np.ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(stats.kendalltau(x,y)[1]))
def test_kendalltau_vs_mstats_basic():
np.random.seed(42)
for s in range(2,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
expected = mstats_basic.kendalltau(a, b)
actual = stats.kendalltau(a, b)
assert_approx_equal(actual[0], expected[0])
assert_approx_equal(actual[1], expected[1])
def test_kendalltau_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [1., 2., 3., 4.]
y = [np.nan, 2.4, 3.4, 3.4]
r1 = stats.kendalltau(x, y, nan_policy='omit')
r2 = stats.kendalltau(x[1:], y[1:])
assert_allclose(r1.statistic, r2.statistic, atol=1e-15)
def test_kendalltau_dep_initial_lexsort():
with pytest.warns(
DeprecationWarning,
match="'kendalltau' keyword argument 'initial_lexsort'"
):
stats.kendalltau([], [], initial_lexsort=True)
def test_kendalltau_gh18139_overflow():
# gh-18139 reported an overflow in `kendalltau` that appeared after
# SciPy 0.15.1. Check that this particular overflow does not occur.
# (Test would fail if warning were emitted.)
import random
random.seed(6272161)
classes = [1, 2, 3, 4, 5, 6, 7]
n_samples = 2 * 10 ** 5
x = random.choices(classes, k=n_samples)
y = random.choices(classes, k=n_samples)
res = stats.kendalltau(x, y)
# Reference value from SciPy 0.15.1
assert_allclose(res.statistic, 0.0011816493905730343)
# Reference p-value from `permutation_test` w/ n_resamples=9999 (default).
# Expected to be accurate to at least two digits.
assert_allclose(res.pvalue, 0.4894, atol=2e-3)
class TestKendallTauAlternative:
def test_kendalltau_alternative_asymptotic(self):
# Test alternative parameter, asymptotic method (due to tie)
# Based on TestCorrSpearman2::test_alternative
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
# strong positive correlation
expected = stats.kendalltau(x1, x2, alternative="two-sided")
assert expected[0] > 0
# rank correlation > 0 -> large "less" p-value
res = stats.kendalltau(x1, x2, alternative="less")
assert_equal(res[0], expected[0])
assert_allclose(res[1], 1 - (expected[1] / 2))
# rank correlation > 0 -> small "greater" p-value
res = stats.kendalltau(x1, x2, alternative="greater")
assert_equal(res[0], expected[0])
assert_allclose(res[1], expected[1] / 2)
# reverse the direction of rank correlation
x2.reverse()
# strong negative correlation
expected = stats.kendalltau(x1, x2, alternative="two-sided")
assert expected[0] < 0
# rank correlation < 0 -> large "greater" p-value
res = stats.kendalltau(x1, x2, alternative="greater")
assert_equal(res[0], expected[0])
assert_allclose(res[1], 1 - (expected[1] / 2))
# rank correlation < 0 -> small "less" p-value
res = stats.kendalltau(x1, x2, alternative="less")
assert_equal(res[0], expected[0])
assert_allclose(res[1], expected[1] / 2)
with pytest.raises(ValueError, match="alternative must be 'less'..."):
stats.kendalltau(x1, x2, alternative="ekki-ekki")
# There are a lot of special cases considered in the calculation of the
# exact p-value, so we test each separately. We also need to test
# separately when the observed statistic is in the left tail vs the right
# tail because the code leverages symmetry of the null distribution; to
# do that we use the same test case but negate one of the samples.
# Reference values computed using R cor.test, e.g.
# options(digits=16)
# x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
# y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)
# cor.test(x, y, method = "kendall", alternative = "g")
alternatives = ('less', 'two-sided', 'greater')
p_n1 = [np.nan, np.nan, np.nan]
p_n2 = [1, 1, 0.5]
p_c0 = [1, 0.3333333333333, 0.1666666666667]
p_c1 = [0.9583333333333, 0.3333333333333, 0.1666666666667]
p_no_correlation = [0.5916666666667, 1, 0.5916666666667]
p_no_correlationb = [0.5475694444444, 1, 0.5475694444444]
p_n_lt_171 = [0.9624118165785, 0.1194389329806, 0.0597194664903]
p_n_lt_171b = [0.246236925303, 0.4924738506059, 0.755634083327]
p_n_lt_171c = [0.9847475308925, 0.03071385306533, 0.01535692653267]
def exact_test(self, x, y, alternative, rev, stat_expected, p_expected):
if rev:
y = -np.asarray(y)
stat_expected *= -1
res = stats.kendalltau(x, y, method='exact', alternative=alternative)
res_expected = stat_expected, p_expected
assert_allclose(res, res_expected)
case_R_n1 = (list(zip(alternatives, p_n1, [False]*3))
+ list(zip(alternatives, reversed(p_n1), [True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_n1)
def test_against_R_n1(self, alternative, p_expected, rev):
x, y = [1], [2]
stat_expected = np.nan
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_n2 = (list(zip(alternatives, p_n2, [False]*3))
+ list(zip(alternatives, reversed(p_n2), [True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_n2)
def test_against_R_n2(self, alternative, p_expected, rev):
x, y = [1, 2], [3, 4]
stat_expected = 0.9999999999999998
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_c0 = (list(zip(alternatives, p_c0, [False]*3))
+ list(zip(alternatives, reversed(p_c0), [True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_c0)
def test_against_R_c0(self, alternative, p_expected, rev):
x, y = [1, 2, 3], [1, 2, 3]
stat_expected = 1
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_c1 = (list(zip(alternatives, p_c1, [False]*3))
+ list(zip(alternatives, reversed(p_c1), [True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_c1)
def test_against_R_c1(self, alternative, p_expected, rev):
x, y = [1, 2, 3, 4], [1, 2, 4, 3]
stat_expected = 0.6666666666666667
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_no_corr = (list(zip(alternatives, p_no_correlation, [False]*3))
+ list(zip(alternatives, reversed(p_no_correlation),
[True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_no_corr)
def test_against_R_no_correlation(self, alternative, p_expected, rev):
x, y = [1, 2, 3, 4, 5], [1, 5, 4, 2, 3]
stat_expected = 0
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_no_cor_b = (list(zip(alternatives, p_no_correlationb, [False]*3))
+ list(zip(alternatives, reversed(p_no_correlationb),
[True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_no_cor_b)
def test_against_R_no_correlationb(self, alternative, p_expected, rev):
x, y = [1, 2, 3, 4, 5, 6, 7, 8], [8, 6, 1, 3, 2, 5, 4, 7]
stat_expected = 0
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_lt_171 = (list(zip(alternatives, p_n_lt_171, [False]*3))
+ list(zip(alternatives, reversed(p_n_lt_171), [True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171)
def test_against_R_lt_171(self, alternative, p_expected, rev):
# Data from Hollander & Wolfe (1973), p. 187f.
# Used from https://rdrr.io/r/stats/cor.test.html
x = [44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1]
y = [2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8]
stat_expected = 0.4444444444444445
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_lt_171b = (list(zip(alternatives, p_n_lt_171b, [False]*3))
+ list(zip(alternatives, reversed(p_n_lt_171b),
[True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171b)
def test_against_R_lt_171b(self, alternative, p_expected, rev):
np.random.seed(0)
x = np.random.rand(100)
y = np.random.rand(100)
stat_expected = -0.04686868686868687
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_R_lt_171c = (list(zip(alternatives, p_n_lt_171c, [False]*3))
+ list(zip(alternatives, reversed(p_n_lt_171c),
[True]*3)))
@pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171c)
def test_against_R_lt_171c(self, alternative, p_expected, rev):
np.random.seed(0)
x = np.random.rand(170)
y = np.random.rand(170)
stat_expected = 0.1115906717716673
self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
case_gt_171 = (list(zip(alternatives, [False]*3)) +
list(zip(alternatives, [True]*3)))
@pytest.mark.parametrize("alternative, rev", case_gt_171)
def test_gt_171(self, alternative, rev):
np.random.seed(0)
x = np.random.rand(400)
y = np.random.rand(400)
res0 = stats.kendalltau(x, y, method='exact',
alternative=alternative)
res1 = stats.kendalltau(x, y, method='asymptotic',
alternative=alternative)
assert_equal(res0[0], res1[0])
assert_allclose(res0[1], res1[1], rtol=1e-3)
@pytest.mark.parametrize("method", ('exact', 'asymptotic'))
@pytest.mark.parametrize("alternative", ('two-sided', 'less', 'greater'))
def test_nan_policy(self, method, alternative):
# Test nan policies
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 9]
x1nan = x1 + [np.nan]
x2nan = x2 + [np.nan]
# test nan_policy="propagate"
res_actual = stats.kendalltau(x1nan, x2nan,
method=method, alternative=alternative)
res_expected = (np.nan, np.nan)
assert_allclose(res_actual, res_expected)
# test nan_policy="omit"
res_actual = stats.kendalltau(x1nan, x2nan, nan_policy='omit',
method=method, alternative=alternative)
res_expected = stats.kendalltau(x1, x2, method=method,
alternative=alternative)
assert_allclose(res_actual, res_expected)
# test nan_policy="raise"
message = 'The input contains nan values'
with pytest.raises(ValueError, match=message):
stats.kendalltau(x1nan, x2nan, nan_policy='raise',
method=method, alternative=alternative)
# test invalid nan_policy
message = "nan_policy must be one of..."
with pytest.raises(ValueError, match=message):
stats.kendalltau(x1nan, x2nan, nan_policy='ekki-ekki',
method=method, alternative=alternative)
def test_weightedtau():
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, 0]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, additive=False)
assert_approx_equal(tau, -0.62205716951801038)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# test for namedtuple attribute results
res = stats.weightedtau(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
assert_equal(res.correlation, res.statistic)
# Asymmetric, ranked version
tau, p_value = stats.weightedtau(x, y, rank=None)
assert_approx_equal(tau, -0.4157652301037516)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None)
assert_approx_equal(tau, -0.7181341329699029)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=None, additive=False)
assert_approx_equal(tau, -0.40644850966246893)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None, additive=False)
assert_approx_equal(tau, -0.83766582937355172)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=False)
assert_approx_equal(tau, -0.51604397940261848)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Test argument conversion
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64))
assert_approx_equal(tau, -0.56694968153682723)
# All ties
tau, p_value = stats.weightedtau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Size mismatches
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2])
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0])
# NaNs
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
x = [12, 2, np.nan, 12, 2]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
# NaNs when the dtype of x and y are all np.float64
x = [12.0, 2.0, 1.0, 12.0, 2.0]
y = [1.0, 4.0, 7.0, 1.0, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
x = [12.0, 2.0, np.nan, 12.0, 2.0]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
# NaNs when there are more than one NaN in x or y
x = [12.0, 2.0, 1.0, 12.0, 1.0]
y = [1.0, 4.0, 7.0, 1.0, 1.0]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.6615242347139803)
x = [12.0, 2.0, np.nan, 12.0, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.6615242347139803)
y = [np.nan, 4.0, 7.0, np.nan, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.6615242347139803)
def test_segfault_issue_9710():
# https://github.com/scipy/scipy/issues/9710
# This test was created to check segfault
# In issue SEGFAULT only repros in optimized builds after calling the function twice
stats.weightedtau([1], [1.0])
stats.weightedtau([1], [1.0])
# The code below also caused SEGFAULT
stats.weightedtau([np.nan], [52])
def test_kendall_tau_large():
n = 172
# Test omit policy
x = np.arange(n + 1).astype(float)
y = np.arange(n + 1).astype(float)
y[-1] = np.nan
_, pval = stats.kendalltau(x, y, method='exact', nan_policy='omit')
assert_equal(pval, 0.0)
def test_weightedtau_vs_quadratic():
# Trivial quadratic implementation, all parameters mandatory
def wkq(x, y, rank, weigher, add):
tot = conc = disc = u = v = 0
for (i, j) in product(range(len(x)), range(len(x))):
w = weigher(rank[i]) + weigher(rank[j]) if add \
else weigher(rank[i]) * weigher(rank[j])
tot += w
if x[i] == x[j]:
u += w
if y[i] == y[j]:
v += w
if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]:
conc += w
elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]:
disc += w
return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v)
def weigher(x):
return 1. / (x + 1)
np.random.seed(42)
for s in range(3,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
# First pass: use element indices as ranks
rank = np.arange(len(a), dtype=np.intp)
for _ in range(2):
for add in [True, False]:
expected = wkq(a, b, rank, weigher, add)
actual = stats.weightedtau(a, b, rank, weigher, add).statistic
assert_approx_equal(expected, actual)
# Second pass: use a random rank
np.random.shuffle(rank)
class TestFindRepeats:
def test_basic(self):
a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5]
res, nums = stats.find_repeats(a)
assert_array_equal(res, [1, 2, 3, 4])
assert_array_equal(nums, [3, 3, 2, 2])
def test_empty_result(self):
# Check that empty arrays are returned when there are no repeats.
for a in [[10, 20, 50, 30, 40], []]:
repeated, counts = stats.find_repeats(a)
assert_array_equal(repeated, [])
assert_array_equal(counts, [])
class TestRegression:
def test_linregressBIGX(self):
# W.II.F. Regress BIG on X.
result = stats.linregress(X, BIG)
assert_almost_equal(result.intercept, 99999990)
assert_almost_equal(result.rvalue, 1.0)
# The uncertainty ought to be almost zero
# since all points lie on a line
assert_almost_equal(result.stderr, 0.0)
assert_almost_equal(result.intercept_stderr, 0.0)
def test_regressXX(self):
# W.IV.B. Regress X on X.
# The constant should be exactly 0 and the regression coefficient
# should be 1. This is a perfectly valid regression and the
# program should not complain.
result = stats.linregress(X, X)
assert_almost_equal(result.intercept, 0.0)
assert_almost_equal(result.rvalue, 1.0)
# The uncertainly on regression through two points ought to be 0
assert_almost_equal(result.stderr, 0.0)
assert_almost_equal(result.intercept_stderr, 0.0)
# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program
# should tell you that this model is "singular" because BIG and
# LITTLE are linear combinations of each other. Cryptic error
# messages are unacceptable here. Singularity is the most
# fundamental regression error.
#
# Need to figure out how to handle multiple linear regression.
# This is not obvious
def test_regressZEROX(self):
# W.IV.D. Regress ZERO on X.
# The program should inform you that ZERO has no variance or it should
# go ahead and compute the regression and report a correlation and
# total sum of squares of exactly 0.
result = stats.linregress(X, ZERO)
assert_almost_equal(result.intercept, 0.0)
assert_almost_equal(result.rvalue, 0.0)
def test_regress_simple(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
result = stats.linregress(x, y)
lr = stats._stats_mstats_common.LinregressResult
assert_(isinstance(result, lr))
assert_almost_equal(result.stderr, 2.3957814497838803e-3)
def test_regress_alternative(self):
# test alternative parameter
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10 # slope is greater than zero
y += np.sin(np.linspace(0, 20, 100))
with pytest.raises(ValueError, match="alternative must be 'less'..."):
stats.linregress(x, y, alternative="ekki-ekki")
res1 = stats.linregress(x, y, alternative="two-sided")
# slope is greater than zero, so "less" p-value should be large
res2 = stats.linregress(x, y, alternative="less")
assert_allclose(res2.pvalue, 1 - (res1.pvalue / 2))
# slope is greater than zero, so "greater" p-value should be small
res3 = stats.linregress(x, y, alternative="greater")
assert_allclose(res3.pvalue, res1.pvalue / 2)
assert res1.rvalue == res2.rvalue == res3.rvalue
def test_regress_against_R(self):
# test against R `lm`
# options(digits=16)
# x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
# y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
# relation <- lm(y~x)
# print(summary(relation))
x = [151, 174, 138, 186, 128, 136, 179, 163, 152, 131]
y = [63, 81, 56, 91, 47, 57, 76, 72, 62, 48]
res = stats.linregress(x, y, alternative="two-sided")
# expected values from R's `lm` above
assert_allclose(res.slope, 0.6746104491292)
assert_allclose(res.intercept, -38.4550870760770)
assert_allclose(res.rvalue, np.sqrt(0.95478224775))
assert_allclose(res.pvalue, 1.16440531074e-06)
assert_allclose(res.stderr, 0.0519051424731)
assert_allclose(res.intercept_stderr, 8.0490133029927)
def test_regress_simple_onearg_rows(self):
# Regress a line w sinusoidal noise,
# with a single input of shape (2, N)
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
rows = np.vstack((x, y))
result = stats.linregress(rows)
assert_almost_equal(result.stderr, 2.3957814497838803e-3)
assert_almost_equal(result.intercept_stderr, 1.3866936078570702e-1)
def test_regress_simple_onearg_cols(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
columns = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
result = stats.linregress(columns)
assert_almost_equal(result.stderr, 2.3957814497838803e-3)
assert_almost_equal(result.intercept_stderr, 1.3866936078570702e-1)
def test_regress_shape_error(self):
# Check that a single input argument to linregress with wrong shape
# results in a ValueError.
assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
def test_linregress(self):
# compared with multivariate ols with pinv
x = np.arange(11)
y = np.arange(5, 16)
y[[(1), (-2)]] -= 1
y[[(0), (-1)]] += 1
result = stats.linregress(x, y)
# This test used to use 'assert_array_almost_equal' but its
# formualtion got confusing since LinregressResult became
# _lib._bunch._make_tuple_bunch instead of namedtuple
# (for backwards compatibility, see PR #12983)
def assert_ae(x, y):
return assert_almost_equal(x, y, decimal=14)
assert_ae(result.slope, 1.0)
assert_ae(result.intercept, 5.0)
assert_ae(result.rvalue, 0.98229948625750)
assert_ae(result.pvalue, 7.45259691e-008)
assert_ae(result.stderr, 0.063564172616372733)
assert_ae(result.intercept_stderr, 0.37605071654517686)
def test_regress_simple_negative_cor(self):
# If the slope of the regression is negative the factor R tend
# to -1 not 1. Sometimes rounding errors makes it < -1
# leading to stderr being NaN.
a, n = 1e-71, 100000
x = np.linspace(a, 2 * a, n)
y = np.linspace(2 * a, a, n)
result = stats.linregress(x, y)
# Make sure propagated numerical errors
# did not bring rvalue below -1 (or were coersced)
assert_(result.rvalue >= -1)
assert_almost_equal(result.rvalue, -1)
# slope and intercept stderror should stay numeric
assert_(not np.isnan(result.stderr))
assert_(not np.isnan(result.intercept_stderr))
def test_linregress_result_attributes(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
result = stats.linregress(x, y)
# Result is of a correct class
lr = stats._stats_mstats_common.LinregressResult
assert_(isinstance(result, lr))
# LinregressResult elements have correct names
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(result, attributes)
# Also check that the extra attribute (intercept_stderr) is present
assert 'intercept_stderr' in dir(result)
def test_regress_two_inputs(self):
# Regress a simple line formed by two points.
x = np.arange(2)
y = np.arange(3, 5)
result = stats.linregress(x, y)
# Non-horizontal line
assert_almost_equal(result.pvalue, 0.0)
# Zero error through two points
assert_almost_equal(result.stderr, 0.0)
assert_almost_equal(result.intercept_stderr, 0.0)
def test_regress_two_inputs_horizontal_line(self):
# Regress a horizontal line formed by two points.
x = np.arange(2)
y = np.ones(2)
result = stats.linregress(x, y)
# Horizontal line
assert_almost_equal(result.pvalue, 1.0)
# Zero error through two points
assert_almost_equal(result.stderr, 0.0)
assert_almost_equal(result.intercept_stderr, 0.0)
def test_nist_norris(self):
x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0,
558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1,
995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0,
11.1, 118.3, 229.2, 669.1, 448.9, 0.5]
y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9,
559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3,
998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9,
10.2, 117.6, 228.9, 668.4, 449.2, 0.2]
result = stats.linregress(x, y)
assert_almost_equal(result.slope, 1.00211681802045)
assert_almost_equal(result.intercept, -0.262323073774029)
assert_almost_equal(result.rvalue**2, 0.999993745883712)
assert_almost_equal(result.pvalue, 0.0)
assert_almost_equal(result.stderr, 0.00042979684820)
assert_almost_equal(result.intercept_stderr, 0.23281823430153)
def test_compare_to_polyfit(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
result = stats.linregress(x, y)
poly = np.polyfit(x, y, 1) # Fit 1st degree polynomial
# Make sure linear regression slope and intercept
# match with results from numpy polyfit
assert_almost_equal(result.slope, poly[0])
assert_almost_equal(result.intercept, poly[1])
def test_empty_input(self):
assert_raises(ValueError, stats.linregress, [], [])
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
result = stats.linregress(x, x)
# Make sure the resut still comes back as `LinregressResult`
lr = stats._stats_mstats_common.LinregressResult
assert_(isinstance(result, lr))
assert_array_equal(result, (np.nan,)*5)
assert_equal(result.intercept_stderr, np.nan)
def test_identical_x(self):
x = np.zeros(10)
y = np.random.random(10)
msg = "Cannot calculate a linear regression"
with assert_raises(ValueError, match=msg):
stats.linregress(x, y)
def test_theilslopes():
# Basic slope test.
slope, intercept, lower, upper = stats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
msg = ("method must be either 'joint' or 'separate'."
"'joint_separate' is invalid.")
with pytest.raises(ValueError, match=msg):
stats.theilslopes([0, 1, 1], method='joint_separate')
slope, intercept, lower, upper = stats.theilslopes([0, 1, 1],
method='joint')
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.0)
# Test of confidence intervals.
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07,
method='separate')
assert_almost_equal(slope, 4)
assert_almost_equal(intercept, 4.0)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07,
method='joint')
assert_almost_equal(slope, 4)
assert_almost_equal(intercept, 6.0)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_cumfreq():
x = [1, 4, 2, 1, 3, 1]
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(
x, numbins=4, defaultreallimits=(1.5, 5))
assert_(extrapoints == 3)
# test for namedtuple attribute results
attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
check_named_results(res, attributes)
def test_relfreq():
a = np.array([1, 4, 2, 1, 3, 1])
relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
assert_array_almost_equal(relfreqs,
array([0.5, 0.16666667, 0.16666667, 0.16666667]))
# test for namedtuple attribute results
attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.relfreq(a, numbins=4)
check_named_results(res, attributes)
# check array_like input is accepted
relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],
numbins=4)
assert_array_almost_equal(relfreqs, relfreqs2)
class TestScoreatpercentile:
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_basic(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_fraction(self):
scoreatperc = stats.scoreatpercentile
# Test defaults
assert_equal(scoreatperc(list(range(10)), 50), 4.5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
# explicitly specify interpolation_method 'fraction' (the default)
assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
interpolation_method='fraction'),
55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
interpolation_method='fraction'),
5.5)
def test_lower_higher(self):
scoreatperc = stats.scoreatpercentile
# interpolation_method 'lower'/'higher'
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
interpolation_method='lower'), 10)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
interpolation_method='higher'), 100)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
interpolation_method='lower'), 1)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
interpolation_method='higher'), 10)
def test_sequence_per(self):
x = arange(8) * 0.5
expected = np.array([0, 3.5, 1.75])
res = stats.scoreatpercentile(x, [0, 100, 50])
assert_allclose(res, expected)
assert_(isinstance(res, np.ndarray))
# Test with ndarray. Regression test for gh-2861
assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
expected)
# Also test combination of 2-D array, axis not None and array-like per
res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
np.array([0, 1, 100, 100]), axis=1)
expected2 = array([[0, 4, 8],
[0.03, 4.03, 8.03],
[3, 7, 11],
[3, 7, 11]])
assert_allclose(res2, expected2)
def test_axis(self):
scoreatperc = stats.scoreatpercentile
x = arange(12).reshape(3, 4)
assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
score = stats.scoreatpercentile(x, 50)
assert_equal(score.shape, ())
assert_equal(score, 1.0)
score = stats.scoreatpercentile(x, 50, axis=0)
assert_equal(score.shape, (3,))
assert_equal(score, [1, 1, 1])
def test_exception(self):
assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
interpolation_method='foobar')
assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
def test_empty(self):
assert_equal(stats.scoreatpercentile([], 50), np.nan)
assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
@pytest.mark.filterwarnings('ignore::FutureWarning')
class TestMode:
def test_empty(self):
vals, counts = stats.mode([])
assert_equal(vals, np.array([]))
assert_equal(counts, np.array([]))
def test_scalar(self):
vals, counts = stats.mode(4.)
assert_equal(vals, np.array([4.]))
assert_equal(counts, np.array([1]))
def test_basic(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
vals = stats.mode(data1)
assert_equal(vals[0], 6)
assert_equal(vals[1], 3)
def test_axes(self):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = np.array([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None, keepdims=True)
assert_equal(vals[0], np.array([[30]]))
assert_equal(vals[1], np.array([[8]]))
vals = stats.mode(arr, axis=0, keepdims=True)
assert_equal(vals[0], np.array([[10, 10, 30, 30]]))
assert_equal(vals[1], np.array([[2, 3, 3, 2]]))
vals = stats.mode(arr, axis=1, keepdims=True)
assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))
assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))
@pytest.mark.parametrize('axis', np.arange(-4, 0))
def test_negative_axes_gh_15375(self, axis):
np.random.seed(984213899)
a = np.random.rand(10, 11, 12, 13)
res0 = stats.mode(a, axis=a.ndim+axis)
res1 = stats.mode(a, axis=axis)
np.testing.assert_array_equal(res0, res1)
def test_mode_result_attributes(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
data2 = []
actual = stats.mode(data1)
attributes = ('mode', 'count')
check_named_results(actual, attributes)
actual2 = stats.mode(data2)
check_named_results(actual2, attributes)
def test_mode_nan(self):
data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
actual = stats.mode(data1)
assert_equal(actual, (6, 3))
actual = stats.mode(data1, nan_policy='omit')
assert_equal(actual, (6, 3))
assert_raises(ValueError, stats.mode, data1, nan_policy='raise')
assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')
@pytest.mark.parametrize("data", [
[3, 5, 1, 1, 3],
[3, np.nan, 5, 1, 1, 3],
[3, 5, 1],
[3, np.nan, 5, 1],
])
@pytest.mark.parametrize('keepdims', [False, True])
def test_smallest_equal(self, data, keepdims):
result = stats.mode(data, nan_policy='omit', keepdims=keepdims)
if keepdims:
assert_equal(result[0][0], 1)
else:
assert_equal(result[0], 1)
@pytest.mark.parametrize('axis', np.arange(-3, 3))
def test_mode_shape_gh_9955(self, axis, dtype=np.float64):
rng = np.random.default_rng(984213899)
a = rng.uniform(size=(3, 4, 5)).astype(dtype)
res = stats.mode(a, axis=axis, keepdims=False)
reference_shape = list(a.shape)
reference_shape.pop(axis)
np.testing.assert_array_equal(res.mode.shape, reference_shape)
np.testing.assert_array_equal(res.count.shape, reference_shape)
def test_nan_policy_propagate_gh_9815(self):
# mode should treat np.nan as it would any other object when
# nan_policy='propagate'
a = [2, np.nan, 1, np.nan]
if NumpyVersion(np.__version__) >= '1.21.0':
res = stats.mode(a)
assert np.isnan(res.mode) and res.count == 2
def test_keepdims(self):
# test empty arrays (handled by `np.mean`)
a = np.zeros((1, 2, 3, 0))
res = stats.mode(a, axis=1, keepdims=False)
assert res.mode.shape == res.count.shape == (1, 3, 0)
res = stats.mode(a, axis=1, keepdims=True)
assert res.mode.shape == res.count.shape == (1, 1, 3, 0)
# test nan_policy='propagate'
a = [[1, 3, 3, np.nan], [1, 1, np.nan, 1]]
res = stats.mode(a, axis=1, keepdims=False)
assert_array_equal(res.mode, [3, 1])
assert_array_equal(res.count, [2, 3])
res = stats.mode(a, axis=1, keepdims=True)
assert_array_equal(res.mode, [[3], [1]])
assert_array_equal(res.count, [[2], [3]])
a = np.array(a)
res = stats.mode(a, axis=None, keepdims=False)
ref = stats.mode(a.ravel(), keepdims=False)
assert_array_equal(res, ref)
assert res.mode.shape == ref.mode.shape == ()
res = stats.mode(a, axis=None, keepdims=True)
ref = stats.mode(a.ravel(), keepdims=True)
assert_equal(res.mode.ravel(), ref.mode.ravel())
assert res.mode.shape == (1, 1)
assert_equal(res.count.ravel(), ref.count.ravel())
assert res.count.shape == (1, 1)
# test nan_policy='omit'
a = [[1, np.nan, np.nan, np.nan, 1],
[np.nan, np.nan, np.nan, np.nan, 2],
[1, 2, np.nan, 5, 5]]
res = stats.mode(a, axis=1, keepdims=False, nan_policy='omit')
assert_array_equal(res.mode, [1, 2, 5])
assert_array_equal(res.count, [2, 1, 2])
res = stats.mode(a, axis=1, keepdims=True, nan_policy='omit')
assert_array_equal(res.mode, [[1], [2], [5]])
assert_array_equal(res.count, [[2], [1], [2]])
a = np.array(a)
res = stats.mode(a, axis=None, keepdims=False, nan_policy='omit')
ref = stats.mode(a.ravel(), keepdims=False, nan_policy='omit')
assert_array_equal(res, ref)
assert res.mode.shape == ref.mode.shape == ()
res = stats.mode(a, axis=None, keepdims=True, nan_policy='omit')
ref = stats.mode(a.ravel(), keepdims=True, nan_policy='omit')
assert_equal(res.mode.ravel(), ref.mode.ravel())
assert res.mode.shape == (1, 1)
assert_equal(res.count.ravel(), ref.count.ravel())
assert res.count.shape == (1, 1)
@pytest.mark.parametrize("nan_policy", ['propagate', 'omit'])
def test_gh16955(self, nan_policy):
# Check that bug reported in gh-16955 is resolved
shape = (4, 3)
data = np.ones(shape)
data[0, 0] = np.nan
res = stats.mode(a=data, axis=1, keepdims=False, nan_policy=nan_policy)
assert_array_equal(res.mode, [1, 1, 1, 1])
assert_array_equal(res.count, [2, 3, 3, 3])
# Test with input from gh-16595. Support for non-numeric input
# was deprecated, so check for the appropriate error.
my_dtype = np.dtype([('asdf', np.uint8), ('qwer', np.float64, (3,))])
test = np.zeros(10, dtype=my_dtype)
with pytest.raises(TypeError, match="Argument `a` is not..."):
stats.mode(test, nan_policy=nan_policy)
def test_gh9955(self):
# The behavior of mode with empty slices (whether the input was empty
# or all elements were omitted) was inconsistent. Test that this is
# resolved: the mode of an empty slice is NaN and the count is zero.
res = stats.mode([])
ref = (np.nan, 0)
assert_equal(res, ref)
res = stats.mode([np.nan], nan_policy='omit')
assert_equal(res, ref)
a = [[10., 20., 20.], [np.nan, np.nan, np.nan]]
res = stats.mode(a, axis=1, nan_policy='omit')
ref = ([20, np.nan], [2, 0])
assert_equal(res, ref)
if NumpyVersion(np.__version__) >= '1.21.0':
res = stats.mode(a, axis=1, nan_policy='propagate')
ref = ([20, np.nan], [2, 3])
assert_equal(res, ref)
z = np.array([[], []])
res = stats.mode(z, axis=1)
ref = ([np.nan, np.nan], [0, 0])
assert_equal(res, ref)
@pytest.mark.filterwarnings('ignore::RuntimeWarning') # np.mean warns
@pytest.mark.parametrize('z', [np.empty((0, 1, 2)), np.empty((1, 1, 2))])
def test_gh17214(self, z):
res = stats.mode(z, axis=None, keepdims=True)
ref = np.mean(z, axis=None, keepdims=True)
assert res[0].shape == res[1].shape == ref.shape == (1, 1, 1)
def test_raise_non_numeric_gh18254(self):
message = "Argument `a` is not recognized as numeric."
class ArrLike():
def __init__(self, x):
self._x = x
def __array__(self):
return self._x.astype(object)
with pytest.raises(TypeError, match=message):
stats.mode(ArrLike(np.arange(3)))
with pytest.raises(TypeError, match=message):
stats.mode(np.arange(3, dtype=object))
class TestSEM:
testcase = [1, 2, 3, 4]
scalar_testcase = 4.
def test_sem(self):
# This is not in R, so used:
# sqrt(var(testcase)*3/4)/sqrt(3)
# y = stats.sem(self.shoes[0])
# assert_approx_equal(y,0.775177399)
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
y = stats.sem(self.scalar_testcase)
assert_(np.isnan(y))
y = stats.sem(self.testcase)
assert_approx_equal(y, 0.6454972244)
n = len(self.testcase)
assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
stats.sem(self.testcase, ddof=2))
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.sem(x), np.nan)
assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769)
assert_raises(ValueError, stats.sem, x, nan_policy='raise')
assert_raises(ValueError, stats.sem, x, nan_policy='foobar')
class TestZmapZscore:
@pytest.mark.parametrize(
'x, y',
[([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 3], [0, 1, 2, 3, 4])]
)
def test_zmap(self, x, y):
z = stats.zmap(x, y)
# For these simple cases, calculate the expected result directly
# by using the formula for the z-score.
expected = (x - np.mean(y))/np.std(y)
assert_allclose(z, expected, rtol=1e-12)
def test_zmap_axis(self):
# Test use of 'axis' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zmap(x, x, axis=0)
z1 = stats.zmap(x, x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zmap_ddof(self):
# Test use of 'ddof' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zmap(x, x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
@pytest.mark.parametrize('ddof', [0, 2])
def test_zmap_nan_policy_omit(self, ddof):
# nans in `scores` are propagated, regardless of `nan_policy`.
# `nan_policy` only affects how nans in `compare` are handled.
scores = np.array([-3, -1, 2, np.nan])
compare = np.array([-8, -3, 2, 7, 12, np.nan])
z = stats.zmap(scores, compare, ddof=ddof, nan_policy='omit')
assert_allclose(z, stats.zmap(scores, compare[~np.isnan(compare)],
ddof=ddof))
@pytest.mark.parametrize('ddof', [0, 2])
def test_zmap_nan_policy_omit_with_axis(self, ddof):
scores = np.arange(-5.0, 9.0).reshape(2, -1)
compare = np.linspace(-8, 6, 24).reshape(2, -1)
compare[0, 4] = np.nan
compare[0, 6] = np.nan
compare[1, 1] = np.nan
z = stats.zmap(scores, compare, nan_policy='omit', axis=1, ddof=ddof)
expected = np.array([stats.zmap(scores[0],
compare[0][~np.isnan(compare[0])],
ddof=ddof),
stats.zmap(scores[1],
compare[1][~np.isnan(compare[1])],
ddof=ddof)])
assert_allclose(z, expected, rtol=1e-14)
def test_zmap_nan_policy_raise(self):
scores = np.array([1, 2, 3])
compare = np.array([-8, -3, 2, 7, 12, np.nan])
with pytest.raises(ValueError, match='input contains nan'):
stats.zmap(scores, compare, nan_policy='raise')
def test_zscore(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zscore([1, 2, 3, 4])
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996,
1.3416407864999])
assert_array_almost_equal(desired, y, decimal=12)
def test_zscore_axis(self):
# Test use of 'axis' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zscore(x, axis=0)
z1 = stats.zscore(x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zscore_ddof(self):
# Test use of 'ddof' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zscore(x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_zscore_nan_propagate(self):
x = np.array([1, 2, np.nan, 4, 5])
z = stats.zscore(x, nan_policy='propagate')
assert all(np.isnan(z))
def test_zscore_nan_omit(self):
x = np.array([1, 2, np.nan, 4, 5])
z = stats.zscore(x, nan_policy='omit')
expected = np.array([-1.2649110640673518,
-0.6324555320336759,
np.nan,
0.6324555320336759,
1.2649110640673518
])
assert_array_almost_equal(z, expected)
def test_zscore_nan_omit_with_ddof(self):
x = np.array([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0])
z = stats.zscore(x, ddof=1, nan_policy='omit')
expected = np.r_[np.nan, stats.zscore(x[1:], ddof=1)]
assert_allclose(z, expected, rtol=1e-13)
def test_zscore_nan_raise(self):
x = np.array([1, 2, np.nan, 4, 5])
assert_raises(ValueError, stats.zscore, x, nan_policy='raise')
def test_zscore_constant_input_1d(self):
x = [-0.087] * 3
z = stats.zscore(x)
assert_equal(z, np.full(len(x), np.nan))
def test_zscore_constant_input_2d(self):
x = np.array([[10.0, 10.0, 10.0, 10.0],
[10.0, 11.0, 12.0, 13.0]])
z0 = stats.zscore(x, axis=0)
assert_equal(z0, np.array([[np.nan, -1.0, -1.0, -1.0],
[np.nan, 1.0, 1.0, 1.0]]))
z1 = stats.zscore(x, axis=1)
assert_equal(z1, np.array([[np.nan, np.nan, np.nan, np.nan],
stats.zscore(x[1])]))
z = stats.zscore(x, axis=None)
assert_equal(z, stats.zscore(x.ravel()).reshape(x.shape))
y = np.ones((3, 6))
z = stats.zscore(y, axis=None)
assert_equal(z, np.full(y.shape, np.nan))
def test_zscore_constant_input_2d_nan_policy_omit(self):
x = np.array([[10.0, 10.0, 10.0, 10.0],
[10.0, 11.0, 12.0, np.nan],
[10.0, 12.0, np.nan, 10.0]])
z0 = stats.zscore(x, nan_policy='omit', axis=0)
s = np.sqrt(3/2)
s2 = np.sqrt(2)
assert_allclose(z0, np.array([[np.nan, -s, -1.0, np.nan],
[np.nan, 0, 1.0, np.nan],
[np.nan, s, np.nan, np.nan]]))
z1 = stats.zscore(x, nan_policy='omit', axis=1)
assert_allclose(z1, np.array([[np.nan, np.nan, np.nan, np.nan],
[-s, 0, s, np.nan],
[-s2/2, s2, np.nan, -s2/2]]))
def test_zscore_2d_all_nan_row(self):
# A row is all nan, and we use axis=1.
x = np.array([[np.nan, np.nan, np.nan, np.nan],
[10.0, 10.0, 12.0, 12.0]])
z = stats.zscore(x, nan_policy='omit', axis=1)
assert_equal(z, np.array([[np.nan, np.nan, np.nan, np.nan],
[-1.0, -1.0, 1.0, 1.0]]))
def test_zscore_2d_all_nan(self):
# The entire 2d array is nan, and we use axis=None.
y = np.full((2, 3), np.nan)
z = stats.zscore(y, nan_policy='omit', axis=None)
assert_equal(z, y)
@pytest.mark.parametrize('x', [np.array([]), np.zeros((3, 0, 5))])
def test_zscore_empty_input(self, x):
z = stats.zscore(x)
assert_equal(z, x)
def test_gzscore_normal_array(self):
z = stats.gzscore([1, 2, 3, 4])
desired = ([-1.526072095151, -0.194700599824, 0.584101799472,
1.136670895503])
assert_allclose(desired, z)
def test_gzscore_masked_array(self):
x = np.array([1, 2, -1, 3, 4])
mx = np.ma.masked_array(x, mask=[0, 0, 1, 0, 0])
z = stats.gzscore(mx)
desired = ([-1.526072095151, -0.194700599824, np.inf, 0.584101799472,
1.136670895503])
assert_allclose(desired, z)
class TestMedianAbsDeviation:
def setup_class(self):
self.dat_nan = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9,
3.03, 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5,
3.6, 3.7, 3.7, 3.7, 3.7, 3.77, 5.28, np.nan])
self.dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7,
3.7, 3.7, 3.7, 3.77, 5.28, 28.95])
def test_median_abs_deviation(self):
assert_almost_equal(stats.median_abs_deviation(self.dat, axis=None),
0.355)
dat = self.dat.reshape(6, 4)
mad = stats.median_abs_deviation(dat, axis=0)
mad_expected = np.asarray([0.435, 0.5, 0.45, 0.4])
assert_array_almost_equal(mad, mad_expected)
def test_mad_nan_omit(self):
mad = stats.median_abs_deviation(self.dat_nan, nan_policy='omit')
assert_almost_equal(mad, 0.34)
def test_axis_and_nan(self):
x = np.array([[1.0, 2.0, 3.0, 4.0, np.nan],
[1.0, 4.0, 5.0, 8.0, 9.0]])
mad = stats.median_abs_deviation(x, axis=1)
assert_equal(mad, np.array([np.nan, 3.0]))
def test_nan_policy_omit_with_inf(sef):
z = np.array([1, 3, 4, 6, 99, np.nan, np.inf])
mad = stats.median_abs_deviation(z, nan_policy='omit')
assert_equal(mad, 3.0)
@pytest.mark.parametrize('axis', [0, 1, 2, None])
def test_size_zero_with_axis(self, axis):
x = np.zeros((3, 0, 4))
mad = stats.median_abs_deviation(x, axis=axis)
assert_equal(mad, np.full_like(x.sum(axis=axis), fill_value=np.nan))
@pytest.mark.parametrize('nan_policy, expected',
[('omit', np.array([np.nan, 1.5, 1.5])),
('propagate', np.array([np.nan, np.nan, 1.5]))])
def test_nan_policy_with_axis(self, nan_policy, expected):
x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[1, 5, 3, 6, np.nan, np.nan],
[5, 6, 7, 9, 9, 10]])
mad = stats.median_abs_deviation(x, nan_policy=nan_policy, axis=1)
assert_equal(mad, expected)
@pytest.mark.parametrize('axis, expected',
[(1, [2.5, 2.0, 12.0]), (None, 4.5)])
def test_center_mean_with_nan(self, axis, expected):
x = np.array([[1, 2, 4, 9, np.nan],
[0, 1, 1, 1, 12],
[-10, -10, -10, 20, 20]])
mad = stats.median_abs_deviation(x, center=np.mean, nan_policy='omit',
axis=axis)
assert_allclose(mad, expected, rtol=1e-15, atol=1e-15)
def test_center_not_callable(self):
with pytest.raises(TypeError, match='callable'):
stats.median_abs_deviation([1, 2, 3, 5], center=99)
def _check_warnings(warn_list, expected_type, expected_len):
"""
Checks that all of the warnings from a list returned by
`warnings.catch_all(record=True)` are of the required type and that the list
contains expected number of warnings.
"""
assert_equal(len(warn_list), expected_len, "number of warnings")
for warn_ in warn_list:
assert_(warn_.category is expected_type)
class TestIQR:
def test_basic(self):
x = np.arange(8) * 0.5
np.random.shuffle(x)
assert_equal(stats.iqr(x), 1.75)
def test_api(self):
d = np.ones((5, 5))
stats.iqr(d)
stats.iqr(d, None)
stats.iqr(d, 1)
stats.iqr(d, (0, 1))
stats.iqr(d, None, (10, 90))
stats.iqr(d, None, (30, 20), 1.0)
stats.iqr(d, None, (25, 75), 1.5, 'propagate')
stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear')
stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True)
def test_empty(self):
assert_equal(stats.iqr([]), np.nan)
assert_equal(stats.iqr(np.arange(0)), np.nan)
def test_constant(self):
# Constant array always gives 0
x = np.ones((7, 4))
assert_equal(stats.iqr(x), 0.0)
assert_array_equal(stats.iqr(x, axis=0), np.zeros(4))
assert_array_equal(stats.iqr(x, axis=1), np.zeros(7))
assert_equal(stats.iqr(x, interpolation='linear'), 0.0)
assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0)
assert_equal(stats.iqr(x, interpolation='nearest'), 0.0)
assert_equal(stats.iqr(x, interpolation='lower'), 0.0)
assert_equal(stats.iqr(x, interpolation='higher'), 0.0)
# 0 only along constant dimensions
# This also tests much of `axis`
y = np.ones((4, 5, 6)) * np.arange(6)
assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6)))
assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6)))
assert_array_equal(stats.iqr(y, axis=2), np.full((4, 5), 2.5))
assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6))
assert_array_equal(stats.iqr(y, axis=(0, 2)), np.full(5, 3.))
assert_array_equal(stats.iqr(y, axis=(1, 2)), np.full(4, 3.))
def test_scalarlike(self):
x = np.arange(1) + 7.0
assert_equal(stats.iqr(x[0]), 0.0)
assert_equal(stats.iqr(x), 0.0)
assert_array_equal(stats.iqr(x, keepdims=True), [0.0])
def test_2D(self):
x = np.arange(15).reshape((3, 5))
assert_equal(stats.iqr(x), 7.0)
assert_array_equal(stats.iqr(x, axis=0), np.full(5, 5.))
assert_array_equal(stats.iqr(x, axis=1), np.full(3, 2.))
assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0)
assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0)
def test_axis(self):
# The `axis` keyword is also put through its paces in `test_keepdims`.
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10) # x.shape = (71, 23, 10)
q = stats.iqr(o)
assert_equal(stats.iqr(x, axis=(0, 1)), q)
x = np.moveaxis(x, -1, 0) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(2, 1)), q)
x = x.swapaxes(0, 1) # x.shape = (71, 10, 23)
assert_equal(stats.iqr(x, axis=(0, 2)), q)
x = x.swapaxes(0, 1) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(0, 1, 2)),
stats.iqr(x, axis=None))
assert_equal(stats.iqr(x, axis=(0,)),
stats.iqr(x, axis=0))
d = np.arange(3 * 5 * 7 * 11)
# Older versions of numpy only shuffle along axis=0.
# Not sure about newer, don't care.
np.random.shuffle(d)
d = d.reshape((3, 5, 7, 11))
assert_equal(stats.iqr(d, axis=(0, 1, 2))[0],
stats.iqr(d[:,:,:, 0].ravel()))
assert_equal(stats.iqr(d, axis=(0, 1, 3))[1],
stats.iqr(d[:,:, 1,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, -4))[2],
stats.iqr(d[:,:, 2,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, 2))[2],
stats.iqr(d[2,:,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 2))[2, 1],
stats.iqr(d[2, 1,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(1, -2))[2, 1],
stats.iqr(d[2, :, :, 1].ravel()))
assert_equal(stats.iqr(d, axis=(1, 3))[2, 2],
stats.iqr(d[2, :, 2,:].ravel()))
assert_raises(np.AxisError, stats.iqr, d, axis=4)
assert_raises(ValueError, stats.iqr, d, axis=(0, 0))
def test_rng(self):
x = np.arange(5)
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5)
assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5)
assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4
assert_raises(ValueError, stats.iqr, x, rng=(0, 101))
assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25))
assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60))
def test_interpolation(self):
x = np.arange(5)
y = np.arange(4)
# Default
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(y), 1.5)
# Linear
assert_equal(stats.iqr(x, interpolation='linear'), 2)
assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
# Higher
assert_equal(stats.iqr(x, interpolation='higher'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3)
assert_equal(stats.iqr(y, interpolation='higher'), 2)
# Lower (will generally, but not always be the same as higher)
assert_equal(stats.iqr(x, interpolation='lower'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2)
assert_equal(stats.iqr(y, interpolation='lower'), 2)
# Nearest
assert_equal(stats.iqr(x, interpolation='nearest'), 2)
assert_equal(stats.iqr(y, interpolation='nearest'), 1)
# Midpoint
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5)
assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
# Check all method= values new in numpy 1.22.0 are accepted
if NumpyVersion(np.__version__) >= '1.22.0':
for method in ('inverted_cdf', 'averaged_inverted_cdf',
'closest_observation', 'interpolated_inverted_cdf',
'hazen', 'weibull', 'median_unbiased',
'normal_unbiased'):
stats.iqr(y, interpolation=method)
assert_raises(ValueError, stats.iqr, x, interpolation='foobar')
def test_keepdims(self):
# Also tests most of `axis`
x = np.ones((3, 5, 7, 11))
assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7))
assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,))
assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1))
assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
def test_nanpolicy(self):
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, nan_policy='propagate'), 7)
assert_equal(stats.iqr(x, nan_policy='omit'), 7)
assert_equal(stats.iqr(x, nan_policy='raise'), 7)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_equal(stats.iqr(x, nan_policy='omit'), 7.5)
assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), np.full(5, 5))
assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2])
assert_raises(ValueError, stats.iqr, x, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise')
# Bad policy
assert_raises(ValueError, stats.iqr, x, nan_policy='barfood')
def test_scale(self):
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, scale=1.0), 7)
assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0), 3.5)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_equal(stats.iqr(x, scale=1.0, nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan)
# axis=1 chosen to show behavior with both nans and without
assert_equal(stats.iqr(x, axis=1, scale=1.0,
nan_policy='propagate'), [2, np.nan, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, np.nan, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'),
[1, np.nan, 1])
# Since NumPy 1.17.0.dev, warnings are no longer emitted by
# np.percentile with nans, so we don't check the number of
# warnings here. See https://github.com/numpy/numpy/pull/12679.
assert_equal(stats.iqr(x, scale=1.0, nan_policy='omit'), 7.5)
assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
7.5 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75)
# Bad scale
assert_raises(ValueError, stats.iqr, x, scale='foobar')
class TestMoments:
"""
Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
testmathworks comes from documentation for the
Statistics Toolbox for Matlab and can be found at both
https://www.mathworks.com/help/stats/kurtosis.html
https://www.mathworks.com/help/stats/skewness.html
Note that both test cases came from here.
"""
testcase = [1,2,3,4]
scalar_testcase = 4.
np.random.seed(1234)
testcase_moment_accuracy = np.random.rand(42)
testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]
def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
expect = np.asarray(expect)
if shape is not None:
expect = np.broadcast_to(expect, shape)
assert_array_equal(actual, expect)
if dtype is None:
dtype = expect.dtype
assert actual.dtype == dtype
@pytest.mark.parametrize('size', [10, (10, 2)])
@pytest.mark.parametrize('m, c', product((0, 1, 2, 3), (None, 0, 1)))
def test_moment_center_scalar_moment(self, size, m, c):
rng = np.random.default_rng(6581432544381372042)
x = rng.random(size=size)
res = stats.moment(x, m, center=c)
c = np.mean(x, axis=0) if c is None else c
ref = np.sum((x - c)**m, axis=0)/len(x)
assert_allclose(res, ref, atol=1e-16)
@pytest.mark.parametrize('size', [10, (10, 2)])
@pytest.mark.parametrize('c', (None, 0, 1))
def test_moment_center_array_moment(self, size, c):
rng = np.random.default_rng(1706828300224046506)
x = rng.random(size=size)
m = [0, 1, 2, 3]
res = stats.moment(x, m, center=c)
ref = [stats.moment(x, i, center=c) for i in m]
assert_equal(res, ref)
def test_moment(self):
# mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))
y = stats.moment(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 0)
assert_approx_equal(y, 1.0)
y = stats.moment(self.testcase, 1)
assert_approx_equal(y, 0.0, 10)
y = stats.moment(self.testcase, 2)
assert_approx_equal(y, 1.25)
y = stats.moment(self.testcase, 3)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 4)
assert_approx_equal(y, 2.5625)
# check array_like input for moment
y = stats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = stats.moment(self.testcase, 0.0)
assert_approx_equal(y, 1.0)
assert_raises(ValueError, stats.moment, self.testcase, 1.2)
y = stats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
message = "Mean of empty slice."
with pytest.warns(RuntimeWarning, match=message):
y = stats.moment([])
self._assert_equal(y, np.nan, dtype=np.float64)
y = stats.moment(np.array([], dtype=np.float32))
self._assert_equal(y, np.nan, dtype=np.float32)
y = stats.moment(np.zeros((1, 0)), axis=0)
self._assert_equal(y, [], shape=(0,), dtype=np.float64)
y = stats.moment([[]], axis=1)
self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
y = stats.moment([[]], moment=[0, 1], axis=0)
self._assert_equal(y, [], shape=(2, 0))
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.moment(x, 2), np.nan)
assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0)
assert_raises(ValueError, stats.moment, x, nan_policy='raise')
assert_raises(ValueError, stats.moment, x, nan_policy='foobar')
@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.complex128])
@pytest.mark.parametrize('expect, moment', [(0, 1), (1, 0)])
def test_constant_moments(self, dtype, expect, moment):
x = np.random.rand(5).astype(dtype)
y = stats.moment(x, moment=moment)
self._assert_equal(y, expect, dtype=dtype)
y = stats.moment(np.broadcast_to(x, (6, 5)), axis=0, moment=moment)
self._assert_equal(y, expect, shape=(5,), dtype=dtype)
y = stats.moment(np.broadcast_to(x, (1, 2, 3, 4, 5)), axis=2,
moment=moment)
self._assert_equal(y, expect, shape=(1, 2, 4, 5), dtype=dtype)
y = stats.moment(np.broadcast_to(x, (1, 2, 3, 4, 5)), axis=None,
moment=moment)
self._assert_equal(y, expect, shape=(), dtype=dtype)
def test_moment_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
mm = stats.moment(a, 2, axis=1, nan_policy="propagate")
np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15)
def test_moment_empty_moment(self):
# tests moment with empty `moment` list
with pytest.raises(ValueError, match=r"'moment' must be a scalar or a"
r" non-empty 1D list/array."):
stats.moment([1, 2, 3, 4], moment=[])
def test_skewness(self):
# Scalar test case
y = stats.skew(self.scalar_testcase)
assert np.isnan(y)
# sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /
# ((sqrt(var(testmathworks)*4/5))**3)/5
y = stats.skew(self.testmathworks)
assert_approx_equal(y, -0.29322304336607, 10)
y = stats.skew(self.testmathworks, bias=0)
assert_approx_equal(y, -0.437111105023940, 10)
y = stats.skew(self.testcase)
assert_approx_equal(y, 0.0, 10)
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid='ignore'):
assert_equal(stats.skew(x), np.nan)
assert_equal(stats.skew(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.skew, x, nan_policy='raise')
assert_raises(ValueError, stats.skew, x, nan_policy='foobar')
def test_skewness_scalar(self):
# `skew` must return a scalar for 1-dim input
assert_equal(stats.skew(arange(10)), 0.0)
def test_skew_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
with np.errstate(invalid='ignore'):
s = stats.skew(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(s, [0, np.nan], atol=1e-15)
def test_skew_constant_value(self):
# Skewness of a constant input should be zero even when the mean is not
# exact (gh-13245)
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
a = np.repeat(-0.27829495, 10)
assert np.isnan(stats.skew(a))
assert np.isnan(stats.skew(a * float(2**50)))
assert np.isnan(stats.skew(a / float(2**50)))
assert np.isnan(stats.skew(a, bias=False))
# similarly, from gh-11086:
assert np.isnan(stats.skew([14.3]*7))
assert np.isnan(stats.skew(1 + np.arange(-3, 4)*1e-16))
def test_kurtosis(self):
# Scalar test case
y = stats.kurtosis(self.scalar_testcase)
assert np.isnan(y)
# sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
# sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
# Set flags for axis = 0 and
# fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab)
y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_approx_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = stats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_approx_equal(y, 3.663542721189047, 10)
y = stats.kurtosis(self.testcase, 0, 0)
assert_approx_equal(y, 1.64)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kurtosis(x), np.nan)
assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000)
assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar')
def test_kurtosis_array_scalar(self):
assert_equal(type(stats.kurtosis([1, 2, 3])), np.float64)
def test_kurtosis_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
k = stats.kurtosis(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15)
def test_kurtosis_constant_value(self):
# Kurtosis of a constant input should be zero, even when the mean is not
# exact (gh-13245)
a = np.repeat(-0.27829495, 10)
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
assert np.isnan(stats.kurtosis(a, fisher=False))
assert np.isnan(stats.kurtosis(a * float(2**50), fisher=False))
assert np.isnan(stats.kurtosis(a / float(2**50), fisher=False))
assert np.isnan(stats.kurtosis(a, fisher=False, bias=False))
def test_moment_accuracy(self):
# 'moment' must have a small enough error compared to the slower
# but very accurate numpy.power() implementation.
tc_no_mean = self.testcase_moment_accuracy - \
np.mean(self.testcase_moment_accuracy)
assert_allclose(np.power(tc_no_mean, 42).mean(),
stats.moment(self.testcase_moment_accuracy, 42))
def test_precision_loss_gh15554(self):
# gh-15554 was one of several issues that have reported problems with
# constant or near-constant input. We can't always fix these, but
# make sure there's a warning.
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
rng = np.random.default_rng(34095309370)
a = rng.random(size=(100, 10))
a[:, 0] = 1.01
stats.skew(a)[0]
def test_empty_1d(self):
message = "Mean of empty slice."
with pytest.warns(RuntimeWarning, match=message):
stats.skew([])
with pytest.warns(RuntimeWarning, match=message):
stats.kurtosis([])
class TestStudentTest:
X1 = np.array([-1, 0, 1])
X2 = np.array([0, 1, 2])
T1_0 = 0
P1_0 = 1
T1_1 = -1.7320508075
P1_1 = 0.22540333075
T1_2 = -3.464102
P1_2 = 0.0741799
T2_0 = 1.732051
P2_0 = 0.2254033
P1_1_l = P1_1 / 2
P1_1_g = 1 - (P1_1 / 2)
def test_onesample(self):
with suppress_warnings() as sup, \
np.errstate(invalid="ignore", divide="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_1samp(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
t, p = stats.ttest_1samp(self.X1, 0)
assert_array_almost_equal(t, self.T1_0)
assert_array_almost_equal(p, self.P1_0)
res = stats.ttest_1samp(self.X1, 0)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
t, p = stats.ttest_1samp(self.X2, 0)
assert_array_almost_equal(t, self.T2_0)
assert_array_almost_equal(p, self.P2_0)
t, p = stats.ttest_1samp(self.X1, 1)
assert_array_almost_equal(t, self.T1_1)
assert_array_almost_equal(p, self.P1_1)
t, p = stats.ttest_1samp(self.X1, 2)
assert_array_almost_equal(t, self.T1_2)
assert_array_almost_equal(p, self.P1_2)
# check nan policy
x = stats.norm.rvs(loc=5, scale=10, size=51, random_state=7654567)
x[50] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'),
(-1.6412624074367159, 0.107147027334048005))
assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise')
assert_raises(ValueError, stats.ttest_1samp, x, 5.0,
nan_policy='foobar')
def test_1samp_alternative(self):
assert_raises(ValueError, stats.ttest_1samp, self.X1, 0,
alternative="error")
t, p = stats.ttest_1samp(self.X1, 1, alternative="less")
assert_allclose(p, self.P1_1_l)
assert_allclose(t, self.T1_1)
t, p = stats.ttest_1samp(self.X1, 1, alternative="greater")
assert_allclose(p, self.P1_1_g)
assert_allclose(t, self.T1_1)
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_1samp_ci_1d(self, alternative):
# test confidence interval method against reference values
rng = np.random.default_rng(8066178009154342972)
n = 10
x = rng.normal(size=n, loc=1.5, scale=2)
popmean = rng.normal() # this shouldn't affect confidence interval
# Reference values generated with R t.test:
# options(digits=16)
# x = c(2.75532884, 0.93892217, 0.94835861, 1.49489446, -0.62396595,
# -1.88019867, -1.55684465, 4.88777104, 5.15310979, 4.34656348)
# t.test(x, conf.level=0.85, alternative='l')
ref = {'two-sided': [0.3594423211709136, 2.9333455028290860],
'greater': [0.7470806207371626, np.inf],
'less': [-np.inf, 2.545707203262837]}
res = stats.ttest_1samp(x, popmean=popmean, alternative=alternative)
ci = res.confidence_interval(confidence_level=0.85)
assert_allclose(ci, ref[alternative])
assert_equal(res.df, n-1)
def test_1samp_ci_iv(self):
# test `confidence_interval` method input validation
res = stats.ttest_1samp(np.arange(10), 0)
message = '`confidence_level` must be a number between 0 and 1.'
with pytest.raises(ValueError, match=message):
res.confidence_interval(confidence_level=10)
class TestPercentileOfScore:
def f(self, *args, **kwargs):
return stats.percentileofscore(*args, **kwargs)
@pytest.mark.parametrize("kind, result", [("rank", 40),
("mean", 35),
("strict", 30),
("weak", 40)])
def test_unique(self, kind, result):
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert_equal(self.f(a, 4, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", 45),
("mean", 40),
("strict", 30),
("weak", 50)])
def test_multiple2(self, kind, result):
a = [1, 2, 3, 4, 4, 5, 6, 7, 8, 9]
assert_equal(self.f(a, 4, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", 50),
("mean", 45),
("strict", 30),
("weak", 60)])
def test_multiple3(self, kind, result):
a = [1, 2, 3, 4, 4, 4, 5, 6, 7, 8]
assert_equal(self.f(a, 4, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", 30),
("mean", 30),
("strict", 30),
("weak", 30)])
def test_missing(self, kind, result):
a = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11]
assert_equal(self.f(a, 4, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", 40),
("mean", 35),
("strict", 30),
("weak", 40)])
def test_large_numbers(self, kind, result):
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
assert_equal(self.f(a, 40, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", 50),
("mean", 45),
("strict", 30),
("weak", 60)])
def test_large_numbers_multiple3(self, kind, result):
a = [10, 20, 30, 40, 40, 40, 50, 60, 70, 80]
assert_equal(self.f(a, 40, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", 30),
("mean", 30),
("strict", 30),
("weak", 30)])
def test_large_numbers_missing(self, kind, result):
a = [10, 20, 30, 50, 60, 70, 80, 90, 100, 110]
assert_equal(self.f(a, 40, kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", [0, 10, 100, 100]),
("mean", [0, 5, 95, 100]),
("strict", [0, 0, 90, 100]),
("weak", [0, 10, 100, 100])])
def test_boundaries(self, kind, result):
a = [10, 20, 30, 50, 60, 70, 80, 90, 100, 110]
assert_equal(self.f(a, [0, 10, 110, 200], kind=kind), result)
@pytest.mark.parametrize("kind, result", [("rank", [0, 10, 100]),
("mean", [0, 5, 95]),
("strict", [0, 0, 90]),
("weak", [0, 10, 100])])
def test_inf(self, kind, result):
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, +np.inf]
assert_equal(self.f(a, [-np.inf, 1, +np.inf], kind=kind), result)
cases = [("propagate", [], 1, np.nan),
("propagate", [np.nan], 1, np.nan),
("propagate", [np.nan], [0, 1, 2], [np.nan, np.nan, np.nan]),
("propagate", [1, 2], [1, 2, np.nan], [50, 100, np.nan]),
("omit", [1, 2, np.nan], [0, 1, 2], [0, 50, 100]),
("omit", [1, 2], [0, 1, np.nan], [0, 50, np.nan]),
("omit", [np.nan, np.nan], [0, 1, 2], [np.nan, np.nan, np.nan])]
@pytest.mark.parametrize("policy, a, score, result", cases)
def test_nans_ok(self, policy, a, score, result):
assert_equal(self.f(a, score, nan_policy=policy), result)
cases = [
("raise", [1, 2, 3, np.nan], [1, 2, 3],
"The input contains nan values"),
("raise", [1, 2, 3], [1, 2, 3, np.nan],
"The input contains nan values"),
]
@pytest.mark.parametrize("policy, a, score, message", cases)
def test_nans_fail(self, policy, a, score, message):
with assert_raises(ValueError, match=message):
self.f(a, score, nan_policy=policy)
@pytest.mark.parametrize("shape", [
(6, ),
(2, 3),
(2, 1, 3),
(2, 1, 1, 3),
])
def test_nd(self, shape):
a = np.array([0, 1, 2, 3, 4, 5])
scores = a.reshape(shape)
results = scores*10
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert_equal(self.f(a, scores, kind="rank"), results)
PowerDivCase = namedtuple('Case', # type: ignore[name-match]
['f_obs', 'f_exp', 'ddof', 'axis',
'chi2', # Pearson's
'log', # G-test (log-likelihood)
'mod_log', # Modified log-likelihood
'cr', # Cressie-Read (lambda=2/3)
])
# The details of the first two elements in power_div_1d_cases are used
# in a test in TestPowerDivergence. Check that code before making
# any changes here.
power_div_1d_cases = [
# Use the default f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# Give a non-uniform f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,
chi2=24,
log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),
mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),
cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +
8*((8/2)**(2/3) - 1))/(5/9)),
# f_exp is a scalar.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# f_exp equal to f_obs.
PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
]
power_div_empty_cases = [
# Shape is (0,)--a data set with length 0. The computed
# test statistic should be 0.
PowerDivCase(f_obs=[],
f_exp=None, ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
# Shape is (0, 3). This is 3 data sets, but each data set has
# length 0, so the computed test statistic should be [0, 0, 0].
PowerDivCase(f_obs=np.array([[],[],[]]).T,
f_exp=None, ddof=0, axis=0,
chi2=[0, 0, 0],
log=[0, 0, 0],
mod_log=[0, 0, 0],
cr=[0, 0, 0]),
# Shape is (3, 0). This represents an empty collection of
# data sets in which each data set has length 3. The test
# statistic should be an empty array.
PowerDivCase(f_obs=np.array([[],[],[]]),
f_exp=None, ddof=0, axis=0,
chi2=[],
log=[],
mod_log=[],
cr=[]),
]
class TestPowerDivergence:
def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,
expected_stat):
f_obs = np.asarray(f_obs)
if axis is None:
num_obs = f_obs.size
else:
b = np.broadcast(f_obs, f_exp)
num_obs = b.shape[axis]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
stat, p = stats.power_divergence(
f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_=lambda_)
assert_allclose(stat, expected_stat)
if lambda_ == 1 or lambda_ == "pearson":
# Also test stats.chisquare.
stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis)
assert_allclose(stat, expected_stat)
ddof = np.asarray(ddof)
expected_p = stats.distributions.chi2.sf(expected_stat,
num_obs - 1 - ddof)
assert_allclose(p, expected_p)
def test_basic(self):
for case in power_div_1d_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_basic_masked(self):
for case in power_div_1d_cases:
mobs = np.ma.array(case.f_obs)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_axis(self):
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
f_obs = np.vstack((case0.f_obs, case1.f_obs))
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp))
# Check the four computational code paths in power_divergence
# using a 2D array with axis=1.
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"pearson", [case0.chi2, case1.chi2])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"log-likelihood", [case0.log, case1.log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"mod-log-likelihood", [case0.mod_log, case1.mod_log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"cressie-read", [case0.cr, case1.cr])
# Reshape case0.f_obs to shape (2,2), and use axis=None.
# The result should be the same.
self.check_power_divergence(
np.array(case0.f_obs).reshape(2, 2), None, 0, None,
"pearson", case0.chi2)
def test_ddof_broadcasting(self):
# Test that ddof broadcasts correctly.
# ddof does not affect the test statistic. It is broadcast
# with the computed test statistic for the computation of
# the p value.
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
# Create 4x2 arrays of observed and expected frequencies.
f_obs = np.vstack((case0.f_obs, case1.f_obs)).T
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp)).T
expected_chi2 = [case0.chi2, case1.chi2]
# ddof has shape (2, 1). This is broadcast with the computed
# statistic, so p will have shape (2,2).
ddof = np.array([[0], [1]])
stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)
assert_allclose(stat, expected_chi2)
# Compute the p values separately, passing in scalars for ddof.
stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])
stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])
assert_array_equal(p, np.vstack((p0, p1)))
def test_empty_cases(self):
with warnings.catch_warnings():
for case in power_div_empty_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
def test_power_divergence_result_attributes(self):
f_obs = power_div_1d_cases[0].f_obs
f_exp = power_div_1d_cases[0].f_exp
ddof = power_div_1d_cases[0].ddof
axis = power_div_1d_cases[0].axis
res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_="pearson")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_power_divergence_gh_12282(self):
# The sums of observed and expected frequencies must match
f_obs = np.array([[10, 20], [30, 20]])
f_exp = np.array([[5, 15], [35, 25]])
with assert_raises(ValueError, match='For each axis slice...'):
stats.power_divergence(f_obs=[10, 20], f_exp=[30, 60])
with assert_raises(ValueError, match='For each axis slice...'):
stats.power_divergence(f_obs=f_obs, f_exp=f_exp, axis=1)
stat, pval = stats.power_divergence(f_obs=f_obs, f_exp=f_exp)
assert_allclose(stat, [5.71428571, 2.66666667])
assert_allclose(pval, [0.01682741, 0.10247043])
def test_gh_chisquare_12282():
# Currently `chisquare` is implemented via power_divergence
# in case that ever changes, perform a basic test like
# test_power_divergence_gh_12282
with assert_raises(ValueError, match='For each axis slice...'):
stats.chisquare(f_obs=[10, 20], f_exp=[30, 60])
@pytest.mark.parametrize("n, dtype", [(200, np.uint8), (1000000, np.int32)])
def test_chiquare_data_types_attributes(n, dtype):
# Regression test for gh-10159 and gh-18368
obs = np.array([n, 0], dtype=dtype)
exp = np.array([n // 2, n // 2], dtype=dtype)
res = stats.chisquare(obs, exp)
stat, p = res
assert_allclose(stat, n, rtol=1e-13)
# check that attributes are identical to unpacked outputs - see gh-18368
assert_equal(res.statistic, stat)
assert_equal(res.pvalue, p)
def test_chisquare_masked_arrays():
# Test masked arrays.
obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T
mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T
mobs = np.ma.masked_array(obs, mask)
expected_chisq = np.array([24.0, 0.5])
expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),
2*(3*np.log(0.75) + 5*np.log(1.25))])
chi2 = stats.distributions.chi2
chisq, p = stats.chisquare(mobs)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.count(axis=0) - 1))
g, p = stats.power_divergence(mobs, lambda_='log-likelihood')
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
chisq, p = stats.chisquare(mobs.T, axis=1)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.T.count(axis=1) - 1))
g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood")
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])
exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])
chi2, p = stats.chisquare(obs1, f_exp=exp1)
# Because of the mask at index 3 of obs1 and at index 4 of exp1,
# only the first three elements are included in the calculation
# of the statistic.
mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)
# When axis=None, the two values should have type np.float64.
chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)
assert_(isinstance(chisq, np.float64))
assert_(isinstance(p, np.float64))
assert_equal(chisq, 1.0)
assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2))
# Empty arrays:
# A data set with length 0 returns a masked scalar.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(np.ma.array([]))
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, ())
assert_(chisq.mask)
empty3 = np.ma.array([[],[],[]])
# empty3 is a collection of 0 data sets (whose lengths would be 3, if
# there were any), so the return value is an array with length 0.
chisq, p = stats.chisquare(empty3)
assert_(isinstance(chisq, np.ma.MaskedArray))
mat.assert_array_equal(chisq, [])
# empty3.T is an array containing 3 data sets, each with length 0,
# so an array of size (3,) is returned, with all values masked.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(empty3.T)
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, (3,))
assert_(np.all(chisq.mask))
def test_power_divergence_against_cressie_read_data():
# Test stats.power_divergence against tables 4 and 5 from
# Cressie and Read, "Multimonial Goodness-of-Fit Tests",
# J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.
# This tests the calculation for several values of lambda.
# Table 4 data recalculated for greater precision according to:
# Shelby J. Haberman, Analysis of Qualitative Data: Volume 1
# Introductory Topics, Academic Press, New York, USA (1978).
obs = np.array([15, 11, 14, 17, 5, 11, 10, 4, 8,
10, 7, 9, 11, 3, 6, 1, 1, 4])
beta = -0.083769 # Haberman (1978), p. 15
i = np.arange(1, len(obs) + 1)
alpha = np.log(obs.sum() / np.exp(beta*i).sum())
expected_counts = np.exp(alpha + beta*i)
# `table4` holds just the second and third columns from Table 4.
table4 = np.vstack((obs, expected_counts)).T
table5 = np.array([
# lambda, statistic
-10.0, 72.2e3,
-5.0, 28.9e1,
-3.0, 65.6,
-2.0, 40.6,
-1.5, 34.0,
-1.0, 29.5,
-0.5, 26.5,
0.0, 24.6,
0.5, 23.4,
0.67, 23.1,
1.0, 22.7,
1.5, 22.6,
2.0, 22.9,
3.0, 24.8,
5.0, 35.5,
10.0, 21.4e1,
]).reshape(-1, 2)
for lambda_, expected_stat in table5:
stat, p = stats.power_divergence(table4[:,0], table4[:,1],
lambda_=lambda_)
assert_allclose(stat, expected_stat, rtol=5e-3)
def test_friedmanchisquare():
# see ticket:113
# verified with matlab and R
# From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
# 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
# From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
array([2,2,1,2,3,1,2,3,2,1,1,3]),
array([2,4,3,3,4,3,3,4,4,1,2,1]),
array([3,5,4,3,4,4,3,3,3,4,4,4])]
# From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
# Probability from this example is inexact using Chisquare approximation of Friedman Chisquare.
x3 = [array([7.0,9.9,8.5,5.1,10.3]),
array([5.3,5.7,4.7,3.5,7.7]),
array([4.9,7.6,5.5,2.8,8.4]),
array([8.8,8.9,8.1,3.3,9.1])]
assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
(10.2283464566929, 0.0167215803284414))
assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.friedmanchisquare(*x1)
check_named_results(res, attributes)
# test using mstats
assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1],
x1[2], x1[3]),
(10.2283464566929, 0.0167215803284414))
# the following fails
# assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
# (18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1],
x3[2], x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1])
class TestKSTest:
"""Tests kstest and ks_1samp agree with K-S various sizes, alternatives, modes."""
def _testOne(self, x, alternative, expected_statistic, expected_prob, mode='auto', decimal=14):
result = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected, decimal=decimal)
def _test_kstest_and_ks1samp(self, x, alternative, mode='auto', decimal=14):
result = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
result_1samp = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
assert_array_almost_equal(np.array(result), result_1samp, decimal=decimal)
def test_namedtuple_attributes(self):
x = np.linspace(-1, 1, 9)
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.kstest(x, 'norm')
check_named_results(res, attributes)
def test_agree_with_ks_1samp(self):
x = np.linspace(-1, 1, 9)
self._test_kstest_and_ks1samp(x, 'two-sided')
x = np.linspace(-15, 15, 9)
self._test_kstest_and_ks1samp(x, 'two-sided')
x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99]
self._test_kstest_and_ks1samp(x, 'two-sided')
self._test_kstest_and_ks1samp(x, 'greater', mode='exact')
self._test_kstest_and_ks1samp(x, 'less', mode='exact')
# missing: no test that uses *args
class TestKSOneSample:
"""Tests kstest and ks_samp 1-samples with K-S various sizes, alternatives, modes."""
def _testOne(self, x, alternative, expected_statistic, expected_prob, mode='auto', decimal=14):
result = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected, decimal=decimal)
def test_namedtuple_attributes(self):
x = np.linspace(-1, 1, 9)
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ks_1samp(x, stats.norm.cdf)
check_named_results(res, attributes)
def test_agree_with_r(self):
# comparing with some values from R
x = np.linspace(-1, 1, 9)
self._testOne(x, 'two-sided', 0.15865525393145705, 0.95164069201518386)
x = np.linspace(-15, 15, 9)
self._testOne(x, 'two-sided', 0.44435602715924361, 0.038850140086788665)
x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99]
self._testOne(x, 'two-sided', 0.293580126801961, 0.293408463684361)
self._testOne(x, 'greater', 0.293580126801961, 0.146988835042376, mode='exact')
self._testOne(x, 'less', 0.109348552425692, 0.732768892470675, mode='exact')
def test_known_examples(self):
# the following tests rely on deterministically replicated rvs
x = stats.norm.rvs(loc=0.2, size=100, random_state=987654321)
self._testOne(x, 'two-sided', 0.12464329735846891, 0.089444888711820769, mode='asymp')
self._testOne(x, 'less', 0.12464329735846891, 0.040989164077641749)
self._testOne(x, 'greater', 0.0072115233216310994, 0.98531158590396228)
def test_ks1samp_allpaths(self):
# Check NaN input, output.
assert_(np.isnan(kolmogn(np.nan, 1, True)))
with assert_raises(ValueError, match='n is not integral: 1.5'):
kolmogn(1.5, 1, True)
assert_(np.isnan(kolmogn(-1, 1, True)))
dataset = np.asarray([
# Check x out of range
(101, 1, True, 1.0),
(101, 1.1, True, 1.0),
(101, 0, True, 0.0),
(101, -0.1, True, 0.0),
(32, 1.0 / 64, True, 0.0), # Ruben-Gambino
(32, 1.0 / 64, False, 1.0), # Ruben-Gambino
(32, 0.5, True, 0.9999999363163307), # Miller
(32, 0.5, False, 6.368366937916623e-08), # Miller 2 * special.smirnov(32, 0.5)
# Check some other paths
(32, 1.0 / 8, True, 0.34624229979775223),
(32, 1.0 / 4, True, 0.9699508336558085),
(1600, 0.49, False, 0.0),
(1600, 1 / 16.0, False, 7.0837876229702195e-06), # 2 * special.smirnov(1600, 1/16.0)
(1600, 14 / 1600, False, 0.99962357317602), # _kolmogn_DMTW
(1600, 1 / 32, False, 0.08603386296651416), # _kolmogn_PelzGood
])
FuncData(kolmogn, dataset, (0, 1, 2), 3).check(dtypes=[int, float, bool])
@pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_1samp])
@pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign",
[('greater', 6, 6, +1),
('less', 7, 7, -1),
('two-sided', 6, 6, +1),
('two-sided', 7, 7, -1)])
def test_location_sign(self, ksfunc, alternative,
x6val, ref_location, ref_sign):
# Test that location and sign corresponding with statistic are as
# expected. (Test is designed to be easy to predict.)
x = np.arange(10) + 0.5
x[6] = x6val
cdf = stats.uniform(scale=10).cdf
res = ksfunc(x, cdf, alternative=alternative)
assert_allclose(res.statistic, 0.1, rtol=1e-15)
assert res.statistic_location == ref_location
assert res.statistic_sign == ref_sign
# missing: no test that uses *args
class TestKSTwoSamples:
"""Tests 2-samples with K-S various sizes, alternatives, modes."""
def _testOne(self, x1, x2, alternative, expected_statistic, expected_prob, mode='auto'):
result = stats.ks_2samp(x1, x2, alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected)
def testSmall(self):
self._testOne([0], [1], 'two-sided', 1.0/1, 1.0)
self._testOne([0], [1], 'greater', 1.0/1, 0.5)
self._testOne([0], [1], 'less', 0.0/1, 1.0)
self._testOne([1], [0], 'two-sided', 1.0/1, 1.0)
self._testOne([1], [0], 'greater', 0.0/1, 1.0)
self._testOne([1], [0], 'less', 1.0/1, 0.5)
def testTwoVsThree(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data1p, data2, 'two-sided', 1.0 / 3, 1.0)
self._testOne(data1p, data2, 'greater', 1.0 / 3, 0.7)
self._testOne(data1p, data2, 'less', 1.0 / 3, 0.7)
self._testOne(data1m, data2, 'two-sided', 2.0 / 3, 0.6)
self._testOne(data1m, data2, 'greater', 2.0 / 3, 0.3)
self._testOne(data1m, data2, 'less', 0, 1.0)
def testTwoVsFour(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0, 4.0])
self._testOne(data1p, data2, 'two-sided', 2.0 / 4, 14.0/15)
self._testOne(data1p, data2, 'greater', 2.0 / 4, 8.0/15)
self._testOne(data1p, data2, 'less', 1.0 / 4, 12.0/15)
self._testOne(data1m, data2, 'two-sided', 3.0 / 4, 6.0/15)
self._testOne(data1m, data2, 'greater', 3.0 / 4, 3.0/15)
self._testOne(data1m, data2, 'less', 0, 1.0)
def test100_100(self):
x100 = np.linspace(1, 100, 100)
x100_2_p1 = x100 + 2 + 0.1
x100_2_m1 = x100 + 2 - 0.1
self._testOne(x100, x100_2_p1, 'two-sided', 3.0 / 100, 0.9999999999962055)
self._testOne(x100, x100_2_p1, 'greater', 3.0 / 100, 0.9143290114276248)
self._testOne(x100, x100_2_p1, 'less', 0, 1.0)
self._testOne(x100, x100_2_m1, 'two-sided', 2.0 / 100, 1.0)
self._testOne(x100, x100_2_m1, 'greater', 2.0 / 100, 0.960978450786184)
self._testOne(x100, x100_2_m1, 'less', 0, 1.0)
def test100_110(self):
x100 = np.linspace(1, 100, 100)
x110 = np.linspace(1, 100, 110)
x110_20_p1 = x110 + 20 + 0.1
x110_20_m1 = x110 + 20 - 0.1
# 100, 110
self._testOne(x100, x110_20_p1, 'two-sided', 232.0 / 1100, 0.015739183865607353)
self._testOne(x100, x110_20_p1, 'greater', 232.0 / 1100, 0.007869594319053203)
self._testOne(x100, x110_20_p1, 'less', 0, 1)
self._testOne(x100, x110_20_m1, 'two-sided', 229.0 / 1100, 0.017803803861026313)
self._testOne(x100, x110_20_m1, 'greater', 229.0 / 1100, 0.008901905958245056)
self._testOne(x100, x110_20_m1, 'less', 0.0, 1.0)
def testRepeatedValues(self):
x2233 = np.array([2] * 3 + [3] * 4 + [5] * 5 + [6] * 4, dtype=int)
x3344 = x2233 + 1
x2356 = np.array([2] * 3 + [3] * 4 + [5] * 10 + [6] * 4, dtype=int)
x3467 = np.array([3] * 10 + [4] * 2 + [6] * 10 + [7] * 4, dtype=int)
self._testOne(x2233, x3344, 'two-sided', 5.0/16, 0.4262934613454952)
self._testOne(x2233, x3344, 'greater', 5.0/16, 0.21465428276573786)
self._testOne(x2233, x3344, 'less', 0.0/16, 1.0)
self._testOne(x2356, x3467, 'two-sided', 190.0/21/26, 0.0919245790168125)
self._testOne(x2356, x3467, 'greater', 190.0/21/26, 0.0459633806858544)
self._testOne(x2356, x3467, 'less', 70.0/21/26, 0.6121593130022775)
def testEqualSizes(self):
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data2, data2+1, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+1, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+1, 'less', 0.0/3, 1.)
self._testOne(data2, data2+0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+0.5, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+0.5, 'less', 0.0/3, 1.)
self._testOne(data2, data2-0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2-0.5, 'greater', 0.0/3, 1.0)
self._testOne(data2, data2-0.5, 'less', 1.0/3, 0.75)
@pytest.mark.slow
def testMiddlingBoth(self):
# 500, 600
n1, n2 = 500, 600
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='auto')
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='asymp')
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='asymp')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='asymp')
with suppress_warnings() as sup:
message = "ks_2samp: Exact calculation unsuccessful."
sup.filter(RuntimeWarning, message)
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='exact')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
_check_warnings(w, RuntimeWarning, 1)
@pytest.mark.slow
def testMediumBoth(self):
# 1000, 1100
n1, n2 = 1000, 1100
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='asymp')
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='auto')
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='asymp')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='asymp')
with suppress_warnings() as sup:
message = "ks_2samp: Exact calculation unsuccessful."
sup.filter(RuntimeWarning, message)
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='exact')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
_check_warnings(w, RuntimeWarning, 1)
def testLarge(self):
# 10000, 110
n1, n2 = 10000, 110
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 100, n2)
self._testOne(x, y, 'two-sided', 55275.0 / lcm, 4.2188474935755949e-15)
self._testOne(x, y, 'greater', 561.0 / lcm, 0.99115454582047591)
self._testOne(x, y, 'less', 55275.0 / lcm, 3.1317328311518713e-26)
def test_gh11184(self):
# 3000, 3001, exact two-sided
np.random.seed(123456)
x = np.random.normal(size=3000)
y = np.random.normal(size=3001) * 1.5
self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, mode='asymp')
self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, mode='exact')
@pytest.mark.xslow
def test_gh11184_bigger(self):
# 10000, 10001, exact two-sided
np.random.seed(123456)
x = np.random.normal(size=10000)
y = np.random.normal(size=10001) * 1.5
self._testOne(x, y, 'two-sided', 0.10597913208679133, 3.3149311398483503e-49, mode='asymp')
self._testOne(x, y, 'two-sided', 0.10597913208679133, 2.7755575615628914e-15, mode='exact')
self._testOne(x, y, 'greater', 0.10597913208679133, 2.7947433906389253e-41, mode='asymp')
self._testOne(x, y, 'less', 0.09658002199780022, 2.7947433906389253e-41, mode='asymp')
@pytest.mark.xslow
def test_gh12999(self):
np.random.seed(123456)
for x in range(1000, 12000, 1000):
vals1 = np.random.normal(size=(x))
vals2 = np.random.normal(size=(x + 10), loc=0.5)
exact = stats.ks_2samp(vals1, vals2, mode='exact').pvalue
asymp = stats.ks_2samp(vals1, vals2, mode='asymp').pvalue
# these two p-values should be in line with each other
assert_array_less(exact, 3 * asymp)
assert_array_less(asymp, 3 * exact)
@pytest.mark.slow
def testLargeBoth(self):
# 10000, 11000
n1, n2 = 10000, 11000
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576, mode='asymp')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990456491488628, mode='exact')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576, mode='auto')
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673)
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724)
with suppress_warnings() as sup:
message = "ks_2samp: Exact calculation unsuccessful."
sup.filter(RuntimeWarning, message)
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673, mode='exact')
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724, mode='exact')
def testNamedAttributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ks_2samp([1, 2], [3])
check_named_results(res, attributes)
@pytest.mark.slow
def test_some_code_paths(self):
# Check that some code paths are executed
from scipy.stats._stats_py import (
_count_paths_outside_method,
_compute_outer_prob_inside_method
)
_compute_outer_prob_inside_method(1, 1, 1, 1)
_count_paths_outside_method(1000, 1, 1, 1001)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, _count_paths_outside_method,
1100, 1099, 1, 1)
assert_raises(FloatingPointError, _count_paths_outside_method,
2000, 1000, 1, 1)
def test_argument_checking(self):
# Check that an empty array causes a ValueError
assert_raises(ValueError, stats.ks_2samp, [], [1])
assert_raises(ValueError, stats.ks_2samp, [1], [])
assert_raises(ValueError, stats.ks_2samp, [], [])
@pytest.mark.slow
def test_gh12218(self):
"""Ensure gh-12218 is fixed."""
# gh-1228 triggered a TypeError calculating sqrt(n1*n2*(n1+n2)).
# n1, n2 both large integers, the product exceeded 2^64
np.random.seed(12345678)
n1 = 2097152 # 2*^21
rvs1 = stats.uniform.rvs(size=n1, loc=0., scale=1)
rvs2 = rvs1 + 1 # Exact value of rvs2 doesn't matter.
stats.ks_2samp(rvs1, rvs2, alternative='greater', mode='asymp')
stats.ks_2samp(rvs1, rvs2, alternative='less', mode='asymp')
stats.ks_2samp(rvs1, rvs2, alternative='two-sided', mode='asymp')
def test_warnings_gh_14019(self):
# Check that RuntimeWarning is raised when method='auto' and exact
# p-value calculation fails. See gh-14019.
rng = np.random.default_rng(abs(hash('test_warnings_gh_14019')))
# random samples of the same size as in the issue
data1 = rng.random(size=881) + 0.5
data2 = rng.random(size=369)
message = "ks_2samp: Exact calculation unsuccessful"
with pytest.warns(RuntimeWarning, match=message):
res = stats.ks_2samp(data1, data2, alternative='less')
assert_allclose(res.pvalue, 0, atol=1e-14)
@pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_2samp])
@pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign",
[('greater', 5.9, 5.9, +1),
('less', 6.1, 6.0, -1),
('two-sided', 5.9, 5.9, +1),
('two-sided', 6.1, 6.0, -1)])
def test_location_sign(self, ksfunc, alternative,
x6val, ref_location, ref_sign):
# Test that location and sign corresponding with statistic are as
# expected. (Test is designed to be easy to predict.)
x = np.arange(10, dtype=np.float64)
y = x.copy()
x[6] = x6val
res = stats.ks_2samp(x, y, alternative=alternative)
assert res.statistic == 0.1
assert res.statistic_location == ref_location
assert res.statistic_sign == ref_sign
def test_ttest_rel():
# regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
# test scalars
with suppress_warnings() as sup, \
np.errstate(invalid="ignore", divide="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_rel(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_rel(rvs1, rvs2, axis=0)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t, p = stats.ttest_rel(np.moveaxis(rvs1_3D, 2, 0),
np.moveaxis(rvs2_3D, 2, 0),
axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test alternative parameter
assert_raises(ValueError, stats.ttest_rel, rvs1, rvs2, alternative="error")
t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="less")
assert_allclose(p, 1 - pr/2)
assert_allclose(t, tr)
t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="greater")
assert_allclose(p, pr/2)
assert_allclose(t, tr)
# check nan policy
rng = np.random.RandomState(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng)
x[500] = np.nan
y = (stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng) +
stats.norm.rvs(scale=0.2, size=501, random_state=rng))
y[500] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),
(0.25299925303978066, 0.8003729814201519))
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')
# test zero division problem
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
# test incorrect input shape raise an error
x = np.arange(24)
assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
x.reshape((2, 3, 4)))
# Convert from two-sided p-values to one sided using T result data.
def convert(t, p, alt):
if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
return p / 2
return 1 - (p / 2)
converter = np.vectorize(convert)
rvs1_2D[:, 20:30] = np.nan
rvs2_2D[:, 15:25] = np.nan
tr, pr = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit')
t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit',
alternative='less')
assert_allclose(t, tr, rtol=1e-14)
with np.errstate(invalid='ignore'):
assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14)
t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit',
alternative='greater')
assert_allclose(t, tr, rtol=1e-14)
with np.errstate(invalid='ignore'):
assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14)
def test_ttest_rel_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_rel(x, y, nan_policy='omit')
r2 = stats.ttest_rel(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are paired when NaNs are dropped
r3 = stats.ttest_rel(y[1:], x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, paired=TRUE)
assert_allclose(r2, (-2, 0.1835), atol=1e-4)
def test_ttest_rel_empty_1d_returns_nan():
# Two empty inputs should return a TtestResult containing nan
# for both values.
result = stats.ttest_rel([], [])
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result, (np.nan, np.nan))
@pytest.mark.parametrize('b, expected_shape',
[(np.empty((1, 5, 0)), (3, 5)),
(np.empty((1, 0, 0)), (3, 0))])
def test_ttest_rel_axis_size_zero(b, expected_shape):
# In this test, the length of the axis dimension is zero.
# The results should be arrays containing nan with shape
# given by the broadcast nonaxis dimensions.
a = np.empty((3, 1, 0))
result = stats.ttest_rel(a, b, axis=-1)
assert isinstance(result, stats._stats_py.TtestResult)
expected_value = np.full(expected_shape, fill_value=np.nan)
assert_equal(result.statistic, expected_value)
assert_equal(result.pvalue, expected_value)
def test_ttest_rel_nonaxis_size_zero():
# In this test, the length of the axis dimension is nonzero,
# but one of the nonaxis dimensions has length 0. Check that
# we still get the correctly broadcast shape, which is (5, 0)
# in this case.
a = np.empty((1, 8, 0))
b = np.empty((5, 8, 1))
result = stats.ttest_rel(a, b, axis=1)
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result.statistic.shape, (5, 0))
assert_equal(result.pvalue.shape, (5, 0))
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_ttest_rel_ci_1d(alternative):
# test confidence interval method against reference values
rng = np.random.default_rng(3749065329432213059)
n = 10
x = rng.normal(size=n, loc=1.5, scale=2)
y = rng.normal(size=n, loc=2, scale=2)
# Reference values generated with R t.test:
# options(digits=16)
# x = c(1.22825792, 1.63950485, 4.39025641, 0.68609437, 2.03813481,
# -1.20040109, 1.81997937, 1.86854636, 2.94694282, 3.94291373)
# y = c(3.49961496, 1.53192536, 5.53620083, 2.91687718, 0.04858043,
# 3.78505943, 3.3077496 , 2.30468892, 3.42168074, 0.56797592)
# t.test(x, y, paired=TRUE, conf.level=0.85, alternative='l')
ref = {'two-sided': [-1.912194489914035, 0.400169725914035],
'greater': [-1.563944820311475, np.inf],
'less': [-np.inf, 0.05192005631147523]}
res = stats.ttest_rel(x, y, alternative=alternative)
ci = res.confidence_interval(confidence_level=0.85)
assert_allclose(ci, ref[alternative])
assert_equal(res.df, n-1)
@pytest.mark.parametrize("test_fun, args",
[(stats.ttest_1samp, (np.arange(10), 0)),
(stats.ttest_rel, (np.arange(10), np.arange(10)))])
def test_ttest_ci_iv(test_fun, args):
# test `confidence_interval` method input validation
res = test_fun(*args)
message = '`confidence_level` must be a number between 0 and 1.'
with pytest.raises(ValueError, match=message):
res.confidence_interval(confidence_level=10)
def _desc_stats(x1, x2, axis=0):
def _stats(x, axis=0):
x = np.asarray(x)
mu = np.mean(x, axis=axis)
std = np.std(x, axis=axis, ddof=1)
nobs = x.shape[axis]
return mu, std, nobs
return _stats(x1, axis) + _stats(x2, axis)
def test_ttest_ind():
# regression test
tr = 1.0912746897927283
pr = 0.27647818616351882
tpr = ([tr,-tr],[pr,pr])
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
# test from_stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2)),
[t, p])
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_ind(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t, p = stats.ttest_ind(np.moveaxis(rvs1_3D, 2, 0),
np.moveaxis(rvs2_3D, 2, 0),
axis=2)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test alternative parameter
assert_raises(ValueError, stats.ttest_ind, rvs1, rvs2, alternative="error")
assert_raises(ValueError, stats.ttest_ind_from_stats,
*_desc_stats(rvs1_2D.T, rvs2_2D.T), alternative="error")
t, p = stats.ttest_ind(rvs1, rvs2, alternative="less")
assert_allclose(p, 1 - (pr/2))
assert_allclose(t, tr)
t, p = stats.ttest_ind(rvs1, rvs2, alternative="greater")
assert_allclose(p, pr/2)
assert_allclose(t, tr)
# Below makes sure ttest_ind_from_stats p-val functions identically to
# ttest_ind
t, p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="less")
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_allclose(
stats.ttest_ind_from_stats(*args, alternative="less"), [t, p])
t, p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="greater")
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_allclose(
stats.ttest_ind_from_stats(*args, alternative="greater"), [t, p])
# check nan policy
rng = np.random.RandomState(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng)
x[500] = np.nan
y = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),
(0.24779670949091914, 0.80434267337517906))
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')
# test zero division problem
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
rvs1_3D[:, :, 10:15] = np.nan
rvs2_3D[:, :, 6:12] = np.nan
# Convert from two-sided p-values to one sided using T result data.
def convert(t, p, alt):
if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
return p / 2
return 1 - (p / 2)
converter = np.vectorize(convert)
tr, pr = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit')
t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit',
alternative='less')
assert_allclose(t, tr, rtol=1e-14)
assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14)
t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit',
alternative='greater')
assert_allclose(t, tr, rtol=1e-14)
assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14)
class Test_ttest_ind_permutations():
N = 20
# data for most tests
np.random.seed(0)
a = np.vstack((np.arange(3*N//4), np.random.random(3*N//4)))
b = np.vstack((np.arange(N//4) + 100, np.random.random(N//4)))
# data for equal variance tests
a2 = np.arange(10)
b2 = np.arange(10) + 100
# data for exact test
a3 = [1, 2]
b3 = [3, 4]
# data for bigger test
np.random.seed(0)
rvs1 = stats.norm.rvs(loc=5, scale=10, # type: ignore
size=500).reshape(100, 5).T
rvs2 = stats.norm.rvs(loc=8, scale=20, size=100) # type: ignore
p_d = [1/1001, (676+1)/1001] # desired pvalues
p_d_gen = [1/1001, (672 + 1)/1001] # desired pvalues for Generator seed
p_d_big = [(993+1)/1001, (685+1)/1001, (840+1)/1001,
(955+1)/1001, (255+1)/1001]
params = [
(a, b, {"axis": 1}, p_d), # basic test
(a.T, b.T, {'axis': 0}, p_d), # along axis 0
(a[0, :], b[0, :], {'axis': None}, p_d[0]), # 1d data
(a[0, :].tolist(), b[0, :].tolist(), {'axis': None}, p_d[0]),
# different seeds
(a, b, {'random_state': 0, "axis": 1}, p_d),
(a, b, {'random_state': np.random.RandomState(0), "axis": 1}, p_d),
(a2, b2, {'equal_var': True}, 1/1001), # equal variances
(rvs1, rvs2, {'axis': -1, 'random_state': 0}, p_d_big), # bigger test
(a3, b3, {}, 1/3), # exact test
(a, b, {'random_state': np.random.default_rng(0), "axis": 1}, p_d_gen),
]
@pytest.mark.parametrize("a,b,update,p_d", params)
def test_ttest_ind_permutations(self, a, b, update, p_d):
options_a = {'axis': None, 'equal_var': False}
options_p = {'axis': None, 'equal_var': False,
'permutations': 1000, 'random_state': 0}
options_a.update(update)
options_p.update(update)
stat_a, _ = stats.ttest_ind(a, b, **options_a)
stat_p, pvalue = stats.ttest_ind(a, b, **options_p)
assert_array_almost_equal(stat_a, stat_p, 5)
assert_array_almost_equal(pvalue, p_d)
def test_ttest_ind_exact_alternative(self):
np.random.seed(0)
N = 3
a = np.random.rand(2, N, 2)
b = np.random.rand(2, N, 2)
options_p = {'axis': 1, 'permutations': 1000}
options_p.update(alternative="greater")
res_g_ab = stats.ttest_ind(a, b, **options_p)
res_g_ba = stats.ttest_ind(b, a, **options_p)
options_p.update(alternative="less")
res_l_ab = stats.ttest_ind(a, b, **options_p)
res_l_ba = stats.ttest_ind(b, a, **options_p)
options_p.update(alternative="two-sided")
res_2_ab = stats.ttest_ind(a, b, **options_p)
res_2_ba = stats.ttest_ind(b, a, **options_p)
# Alternative doesn't affect the statistic
assert_equal(res_g_ab.statistic, res_l_ab.statistic)
assert_equal(res_g_ab.statistic, res_2_ab.statistic)
# Reversing order of inputs negates statistic
assert_equal(res_g_ab.statistic, -res_g_ba.statistic)
assert_equal(res_l_ab.statistic, -res_l_ba.statistic)
assert_equal(res_2_ab.statistic, -res_2_ba.statistic)
# Reversing order of inputs does not affect p-value of 2-sided test
assert_equal(res_2_ab.pvalue, res_2_ba.pvalue)
# In exact test, distribution is perfectly symmetric, so these
# identities are exactly satisfied.
assert_equal(res_g_ab.pvalue, res_l_ba.pvalue)
assert_equal(res_l_ab.pvalue, res_g_ba.pvalue)
mask = res_g_ab.pvalue <= 0.5
assert_equal(res_g_ab.pvalue[mask] + res_l_ba.pvalue[mask],
res_2_ab.pvalue[mask])
assert_equal(res_l_ab.pvalue[~mask] + res_g_ba.pvalue[~mask],
res_2_ab.pvalue[~mask])
def test_ttest_ind_exact_selection(self):
# test the various ways of activating the exact test
np.random.seed(0)
N = 3
a = np.random.rand(N)
b = np.random.rand(N)
res0 = stats.ttest_ind(a, b)
res1 = stats.ttest_ind(a, b, permutations=1000)
res2 = stats.ttest_ind(a, b, permutations=0)
res3 = stats.ttest_ind(a, b, permutations=np.inf)
assert res1.pvalue != res0.pvalue
assert res2.pvalue == res0.pvalue
assert res3.pvalue == res1.pvalue
def test_ttest_ind_exact_distribution(self):
# the exact distribution of the test statistic should have
# binom(na + nb, na) elements, all unique. This was not always true
# in gh-4824; fixed by gh-13661.
np.random.seed(0)
a = np.random.rand(3)
b = np.random.rand(4)
data = np.concatenate((a, b))
na, nb = len(a), len(b)
permutations = 100000
t_stat, _, _ = _permutation_distribution_t(data, permutations, na,
True)
n_unique = len(set(t_stat))
assert n_unique == binom(na + nb, na)
assert len(t_stat) == n_unique
def test_ttest_ind_randperm_alternative(self):
np.random.seed(0)
N = 50
a = np.random.rand(2, 3, N)
b = np.random.rand(3, N)
options_p = {'axis': -1, 'permutations': 1000, "random_state": 0}
options_p.update(alternative="greater")
res_g_ab = stats.ttest_ind(a, b, **options_p)
res_g_ba = stats.ttest_ind(b, a, **options_p)
options_p.update(alternative="less")
res_l_ab = stats.ttest_ind(a, b, **options_p)
res_l_ba = stats.ttest_ind(b, a, **options_p)
# Alternative doesn't affect the statistic
assert_equal(res_g_ab.statistic, res_l_ab.statistic)
# Reversing order of inputs negates statistic
assert_equal(res_g_ab.statistic, -res_g_ba.statistic)
assert_equal(res_l_ab.statistic, -res_l_ba.statistic)
# For random permutations, the chance of ties between the observed
# test statistic and the population is small, so:
assert_equal(res_g_ab.pvalue + res_l_ab.pvalue,
1 + 1/(options_p['permutations'] + 1))
assert_equal(res_g_ba.pvalue + res_l_ba.pvalue,
1 + 1/(options_p['permutations'] + 1))
@pytest.mark.slow()
def test_ttest_ind_randperm_alternative2(self):
np.random.seed(0)
N = 50
a = np.random.rand(N, 4)
b = np.random.rand(N, 4)
options_p = {'permutations': 20000, "random_state": 0}
options_p.update(alternative="greater")
res_g_ab = stats.ttest_ind(a, b, **options_p)
options_p.update(alternative="less")
res_l_ab = stats.ttest_ind(a, b, **options_p)
options_p.update(alternative="two-sided")
res_2_ab = stats.ttest_ind(a, b, **options_p)
# For random permutations, the chance of ties between the observed
# test statistic and the population is small, so:
assert_equal(res_g_ab.pvalue + res_l_ab.pvalue,
1 + 1/(options_p['permutations'] + 1))
# For for large sample sizes, the distribution should be approximately
# symmetric, so these identities should be approximately satisfied
mask = res_g_ab.pvalue <= 0.5
assert_allclose(2 * res_g_ab.pvalue[mask],
res_2_ab.pvalue[mask], atol=2e-2)
assert_allclose(2 * (1-res_g_ab.pvalue[~mask]),
res_2_ab.pvalue[~mask], atol=2e-2)
assert_allclose(2 * res_l_ab.pvalue[~mask],
res_2_ab.pvalue[~mask], atol=2e-2)
assert_allclose(2 * (1-res_l_ab.pvalue[mask]),
res_2_ab.pvalue[mask], atol=2e-2)
def test_ttest_ind_permutation_nanpolicy(self):
np.random.seed(0)
N = 50
a = np.random.rand(N, 5)
b = np.random.rand(N, 5)
a[5, 1] = np.nan
b[8, 2] = np.nan
a[9, 3] = np.nan
b[9, 3] = np.nan
options_p = {'permutations': 1000, "random_state": 0}
# Raise
options_p.update(nan_policy="raise")
with assert_raises(ValueError, match="The input contains nan values"):
res = stats.ttest_ind(a, b, **options_p)
# Propagate
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "invalid value*")
options_p.update(nan_policy="propagate")
res = stats.ttest_ind(a, b, **options_p)
mask = np.isnan(a).any(axis=0) | np.isnan(b).any(axis=0)
res2 = stats.ttest_ind(a[:, ~mask], b[:, ~mask], **options_p)
assert_equal(res.pvalue[mask], np.nan)
assert_equal(res.statistic[mask], np.nan)
assert_allclose(res.pvalue[~mask], res2.pvalue)
assert_allclose(res.statistic[~mask], res2.statistic)
# Propagate 1d
res = stats.ttest_ind(a.ravel(), b.ravel(), **options_p)
assert np.isnan(res.pvalue) # assert makes sure it's a scalar
assert np.isnan(res.statistic)
def test_ttest_ind_permutation_check_inputs(self):
with assert_raises(ValueError, match="Permutations must be"):
stats.ttest_ind(self.a2, self.b2, permutations=-3)
with assert_raises(ValueError, match="Permutations must be"):
stats.ttest_ind(self.a2, self.b2, permutations=1.5)
with assert_raises(ValueError, match="'hello' cannot be used"):
stats.ttest_ind(self.a, self.b, permutations=1,
random_state='hello', axis=1)
def test_ttest_ind_permutation_check_p_values(self):
# p-values should never be exactly zero
N = 10
a = np.random.rand(N, 20)
b = np.random.rand(N, 20)
p_values = stats.ttest_ind(a, b, permutations=1).pvalue
print(0.0 not in p_values)
assert 0.0 not in p_values
class Test_ttest_ind_common:
# for tests that are performed on variations of the t-test such as
# permutations and trimming
@pytest.mark.slow()
@pytest.mark.parametrize("kwds", [{'permutations': 200, 'random_state': 0},
{'trim': .2}, {}],
ids=["permutations", "trim", "basic"])
@pytest.mark.parametrize('equal_var', [True, False],
ids=['equal_var', 'unequal_var'])
def test_ttest_many_dims(self, kwds, equal_var):
# Test that test works on many-dimensional arrays
np.random.seed(0)
a = np.random.rand(5, 4, 4, 7, 1, 6)
b = np.random.rand(4, 1, 8, 2, 6)
res = stats.ttest_ind(a, b, axis=-3, **kwds)
# compare fully-vectorized t-test against t-test on smaller slice
i, j, k = 2, 3, 1
a2 = a[i, :, j, :, 0, :]
b2 = b[:, 0, :, k, :]
res2 = stats.ttest_ind(a2, b2, axis=-2, **kwds)
assert_equal(res.statistic[i, :, j, k, :],
res2.statistic)
assert_equal(res.pvalue[i, :, j, k, :],
res2.pvalue)
# compare against t-test on one axis-slice at a time
# manually broadcast with tile; move axis to end to simplify
x = np.moveaxis(np.tile(a, (1, 1, 1, 1, 2, 1)), -3, -1)
y = np.moveaxis(np.tile(b, (5, 1, 4, 1, 1, 1)), -3, -1)
shape = x.shape[:-1]
statistics = np.zeros(shape)
pvalues = np.zeros(shape)
for indices in product(*(range(i) for i in shape)):
xi = x[indices] # use tuple to index single axis slice
yi = y[indices]
res3 = stats.ttest_ind(xi, yi, axis=-1, **kwds)
statistics[indices] = res3.statistic
pvalues[indices] = res3.pvalue
assert_allclose(statistics, res.statistic)
assert_allclose(pvalues, res.pvalue)
@pytest.mark.parametrize("kwds", [{'permutations': 200, 'random_state': 0},
{'trim': .2}, {}],
ids=["trim", "permutations", "basic"])
@pytest.mark.parametrize("axis", [-1, 0])
def test_nans_on_axis(self, kwds, axis):
# confirm that with `nan_policy='propagate'`, NaN results are returned
# on the correct location
a = np.random.randint(10, size=(5, 3, 10)).astype('float')
b = np.random.randint(10, size=(5, 3, 10)).astype('float')
# set some indices in `a` and `b` to be `np.nan`.
a[0][2][3] = np.nan
b[2][0][6] = np.nan
# arbitrarily use `np.sum` as a baseline for which indices should be
# NaNs
expected = np.isnan(np.sum(a + b, axis=axis))
# multidimensional inputs to `t.sf(np.abs(t), df)` with NaNs on some
# indices throws an warning. See issue gh-13844
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning,
"invalid value encountered in less_equal")
sup.filter(RuntimeWarning, "Precision loss occurred")
res = stats.ttest_ind(a, b, axis=axis, **kwds)
p_nans = np.isnan(res.pvalue)
assert_array_equal(p_nans, expected)
statistic_nans = np.isnan(res.statistic)
assert_array_equal(statistic_nans, expected)
class Test_ttest_trim:
params = [
[[1, 2, 3], [1.1, 2.9, 4.2], 0.53619490753126731, -0.6864951273557258,
.2],
[[56, 128.6, 12, 123.8, 64.34, 78, 763.3], [1.1, 2.9, 4.2],
0.00998909252078421, 4.591598691181999, .2],
[[56, 128.6, 12, 123.8, 64.34, 78, 763.3], [1.1, 2.9, 4.2],
0.10512380092302633, 2.832256715395378, .32],
[[2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9],
[6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1],
0.002878909511344, -4.2461168970325, .2],
[[-0.84504783, 0.13366078, 3.53601757, -0.62908581, 0.54119466,
-1.16511574, -0.08836614, 1.18495416, 2.48028757, -1.58925028,
-1.6706357, 0.3090472, -2.12258305, 0.3697304, -1.0415207,
-0.57783497, -0.90997008, 1.09850192, 0.41270579, -1.4927376],
[1.2725522, 1.1657899, 2.7509041, 1.2389013, -0.9490494, -1.0752459,
1.1038576, 2.9912821, 3.5349111, 0.4171922, 1.0168959, -0.7625041,
-0.4300008, 3.0431921, 1.6035947, 0.5285634, -0.7649405, 1.5575896,
1.3670797, 1.1726023], 0.005293305834235, -3.0983317739483, .2]]
@pytest.mark.parametrize("a,b,pr,tr,trim", params)
def test_ttest_compare_r(self, a, b, pr, tr, trim):
'''
Using PairedData's yuen.t.test method. Something to note is that there
are at least 3 R packages that come with a trimmed t-test method, and
comparisons were made between them. It was found that PairedData's
method's results match this method, SAS, and one of the other R
methods. A notable discrepancy was the DescTools implementation of the
function, which only sometimes agreed with SAS, WRS2, PairedData and
this implementation. For this reason, most comparisons in R are made
against PairedData's method.
Rather than providing the input and output for all evaluations, here is
a representative example:
> library(PairedData)
> a <- c(1, 2, 3)
> b <- c(1.1, 2.9, 4.2)
> options(digits=16)
> yuen.t.test(a, b, tr=.2)
Two-sample Yuen test, trim=0.2
data: x and y
t = -0.68649512735573, df = 3.4104431643464, p-value = 0.5361949075313
alternative hypothesis: true difference in trimmed means is not equal
to 0
95 percent confidence interval:
-3.912777195645217 2.446110528978550
sample estimates:
trimmed mean of x trimmed mean of y
2.000000000000000 2.73333333333333
'''
statistic, pvalue = stats.ttest_ind(a, b, trim=trim, equal_var=False)
assert_allclose(statistic, tr, atol=1e-15)
assert_allclose(pvalue, pr, atol=1e-15)
def test_compare_SAS(self):
# Source of the data used in this test:
# https://support.sas.com/resources/papers/proceedings14/1660-2014.pdf
a = [12, 14, 18, 25, 32, 44, 12, 14, 18, 25, 32, 44]
b = [17, 22, 14, 12, 30, 29, 19, 17, 22, 14, 12, 30, 29, 19]
# In this paper, a trimming percentage of 5% is used. However,
# in their implementation, the number of values trimmed is rounded to
# the nearest whole number. However, consistent with
# `scipy.stats.trimmed_mean`, this test truncates to the lower
# whole number. In this example, the paper notes that 1 value is
# trimmed off of each side. 9% replicates this amount of trimming.
statistic, pvalue = stats.ttest_ind(a, b, trim=.09, equal_var=False)
assert_allclose(pvalue, 0.514522, atol=1e-6)
assert_allclose(statistic, 0.669169, atol=1e-6)
def test_equal_var(self):
'''
The PairedData library only supports unequal variances. To compare
samples with equal variances, the multicon library is used.
> library(multicon)
> a <- c(2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9)
> b <- c(6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1)
> dv = c(a,b)
> iv = c(rep('a', length(a)), rep('b', length(b)))
> yuenContrast(dv~ iv, EQVAR = TRUE)
$Ms
N M wgt
a 11 2.442857142857143 1
b 11 5.385714285714286 -1
$test
stat df crit p
results -4.246116897032513 12 2.178812829667228 0.00113508833897713
'''
a = [2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9]
b = [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1]
# `equal_var=True` is default
statistic, pvalue = stats.ttest_ind(a, b, trim=.2)
assert_allclose(pvalue, 0.00113508833897713, atol=1e-10)
assert_allclose(statistic, -4.246116897032513, atol=1e-10)
@pytest.mark.parametrize('alt,pr,tr',
(('greater', 0.9985605452443, -4.2461168970325),
('less', 0.001439454755672, -4.2461168970325),),
)
def test_alternatives(self, alt, pr, tr):
'''
> library(PairedData)
> a <- c(2.7,2.7,1.1,3.0,1.9,3.0,3.8,3.8,0.3,1.9,1.9)
> b <- c(6.5,5.4,8.1,3.5,0.5,3.8,6.8,4.9,9.5,6.2,4.1)
> options(digits=16)
> yuen.t.test(a, b, alternative = 'greater')
'''
a = [2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9]
b = [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1]
statistic, pvalue = stats.ttest_ind(a, b, trim=.2, equal_var=False,
alternative=alt)
assert_allclose(pvalue, pr, atol=1e-10)
assert_allclose(statistic, tr, atol=1e-10)
def test_errors_unsupported(self):
# confirm that attempting to trim with NaNs or permutations raises an
# error
match = "Permutations are currently not supported with trimming."
with assert_raises(ValueError, match=match):
stats.ttest_ind([1, 2], [2, 3], trim=.2, permutations=2)
@pytest.mark.parametrize("trim", [-.2, .5, 1])
def test_trim_bounds_error(self, trim):
match = "Trimming percentage should be 0 <= `trim` < .5."
with assert_raises(ValueError, match=match):
stats.ttest_ind([1, 2], [2, 1], trim=trim)
class Test_ttest_CI:
# indices in order [alternative={two-sided, less, greater},
# equal_var={False, True}, trim={0, 0.2}]
# reference values in order `statistic, df, pvalue, low, high`
# equal_var=False reference values computed with R PairedData yuen.t.test:
#
# library(PairedData)
# options(digits=16)
# a < - c(0.88236329, 0.97318744, 0.4549262, 0.97893335, 0.0606677,
# 0.44013366, 0.55806018, 0.40151434, 0.14453315, 0.25860601,
# 0.20202162)
# b < - c(0.93455277, 0.42680603, 0.49751939, 0.14152846, 0.711435,
# 0.77669667, 0.20507578, 0.78702772, 0.94691855, 0.32464958,
# 0.3873582, 0.35187468, 0.21731811)
# yuen.t.test(a, b, tr=0, conf.level = 0.9, alternative = 'l')
#
# equal_var=True reference values computed with R multicon yuenContrast:
#
# library(multicon)
# options(digits=16)
# a < - c(0.88236329, 0.97318744, 0.4549262, 0.97893335, 0.0606677,
# 0.44013366, 0.55806018, 0.40151434, 0.14453315, 0.25860601,
# 0.20202162)
# b < - c(0.93455277, 0.42680603, 0.49751939, 0.14152846, 0.711435,
# 0.77669667, 0.20507578, 0.78702772, 0.94691855, 0.32464958,
# 0.3873582, 0.35187468, 0.21731811)
# dv = c(a, b)
# iv = c(rep('a', length(a)), rep('b', length(b)))
# yuenContrast(dv~iv, EQVAR = FALSE, alternative = 'unequal', tr = 0.2)
r = np.empty(shape=(3, 2, 2, 5))
r[0, 0, 0] = [-0.2314607, 19.894435, 0.8193209, -0.247220294, 0.188729943]
r[1, 0, 0] = [-0.2314607, 19.894435, 0.40966045, -np.inf, 0.1382426469]
r[2, 0, 0] = [-0.2314607, 19.894435, 0.5903395, -0.1967329982, np.inf]
r[0, 0, 1] = [-0.2452886, 11.427896, 0.8105823, -0.34057446, 0.25847383]
r[1, 0, 1] = [-0.2452886, 11.427896, 0.40529115, -np.inf, 0.1865829074]
r[2, 0, 1] = [-0.2452886, 11.427896, 0.5947089, -0.268683541, np.inf]
# confidence interval not available for equal_var=True
r[0, 1, 0] = [-0.2345625322555006, 22, 0.8167175905643815, None, None]
r[1, 1, 0] = [-0.2345625322555006, 22, 0.4083587952821908, None, None]
r[2, 1, 0] = [-0.2345625322555006, 22, 0.5916412047178092, None, None]
r[0, 1, 1] = [-0.2505369406507428, 14, 0.8058115135702835, None, None]
r[1, 1, 1] = [-0.2505369406507428, 14, 0.4029057567851417, None, None]
r[2, 1, 1] = [-0.2505369406507428, 14, 0.5970942432148583, None, None]
@pytest.mark.parametrize('alternative', ['two-sided', 'less', 'greater'])
@pytest.mark.parametrize('equal_var', [False, True])
@pytest.mark.parametrize('trim', [0, 0.2])
def test_confidence_interval(self, alternative, equal_var, trim):
if equal_var and trim:
pytest.xfail('Discrepancy in `main`; needs further investigation.')
rng = np.random.default_rng(3810954496107292580)
x = rng.random(11)
y = rng.random(13)
res = stats.ttest_ind(x, y, alternative=alternative,
equal_var=equal_var, trim=trim)
alternatives = {'two-sided': 0, 'less': 1, 'greater': 2}
ref = self.r[alternatives[alternative], int(equal_var), int(np.ceil(trim))]
statistic, df, pvalue, low, high = ref
assert_allclose(res.statistic, statistic)
assert_allclose(res.df, df)
assert_allclose(res.pvalue, pvalue)
if not equal_var: # CI not available when `equal_var is True`
ci = res.confidence_interval(0.9)
assert_allclose(ci.low, low)
assert_allclose(ci.high, high)
def test__broadcast_concatenate():
# test that _broadcast_concatenate properly broadcasts arrays along all
# axes except `axis`, then concatenates along axis
np.random.seed(0)
a = np.random.rand(5, 4, 4, 3, 1, 6)
b = np.random.rand(4, 1, 8, 2, 6)
c = _broadcast_concatenate((a, b), axis=-3)
# broadcast manually as an independent check
a = np.tile(a, (1, 1, 1, 1, 2, 1))
b = np.tile(b[None, ...], (5, 1, 4, 1, 1, 1))
for index in product(*(range(i) for i in c.shape)):
i, j, k, l, m, n = index
if l < a.shape[-3]:
assert a[i, j, k, l, m, n] == c[i, j, k, l, m, n]
else:
assert b[i, j, k, l - a.shape[-3], m, n] == c[i, j, k, l, m, n]
def test_ttest_ind_with_uneq_var():
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
# test from desc stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
# regression test
tr = 1.0912746897927283
tr_uneq_n = 0.66745638708050492
pr = 0.27647831993021388
pr_uneq_n = 0.50873585065616544
tpr = ([tr,-tr],[pr,pr])
rvs3 = np.linspace(1,100, 25)
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
assert_array_almost_equal([t,p],(tr,pr))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)
assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs3),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t, p = stats.ttest_ind(np.moveaxis(rvs1_3D, 2, 0),
np.moveaxis(rvs2_3D, 2, 0),
axis=2, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
args = _desc_stats(np.moveaxis(rvs1_3D, 2, 0),
np.moveaxis(rvs2_3D, 2, 0), axis=2)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test zero division problem
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False),
(np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_ind(x, y, nan_policy='omit')
r2 = stats.ttest_ind(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are not paired when NaNs are dropped
r3 = stats.ttest_ind(y, x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, var.equal=TRUE)
assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901),
atol=1e-15)
def test_ttest_ind_empty_1d_returns_nan():
# Two empty inputs should return a TtestResult containing nan
# for both values.
result = stats.ttest_ind([], [])
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result, (np.nan, np.nan))
@pytest.mark.parametrize('b, expected_shape',
[(np.empty((1, 5, 0)), (3, 5)),
(np.empty((1, 0, 0)), (3, 0))])
def test_ttest_ind_axis_size_zero(b, expected_shape):
# In this test, the length of the axis dimension is zero.
# The results should be arrays containing nan with shape
# given by the broadcast nonaxis dimensions.
a = np.empty((3, 1, 0))
result = stats.ttest_ind(a, b, axis=-1)
assert isinstance(result, stats._stats_py.TtestResult)
expected_value = np.full(expected_shape, fill_value=np.nan)
assert_equal(result.statistic, expected_value)
assert_equal(result.pvalue, expected_value)
def test_ttest_ind_nonaxis_size_zero():
# In this test, the length of the axis dimension is nonzero,
# but one of the nonaxis dimensions has length 0. Check that
# we still get the correctly broadcast shape, which is (5, 0)
# in this case.
a = np.empty((1, 8, 0))
b = np.empty((5, 8, 1))
result = stats.ttest_ind(a, b, axis=1)
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result.statistic.shape, (5, 0))
assert_equal(result.pvalue.shape, (5, 0))
def test_ttest_ind_nonaxis_size_zero_different_lengths():
# In this test, the length of the axis dimension is nonzero,
# and that size is different in the two inputs,
# and one of the nonaxis dimensions has length 0. Check that
# we still get the correctly broadcast shape, which is (5, 0)
# in this case.
a = np.empty((1, 7, 0))
b = np.empty((5, 8, 1))
result = stats.ttest_ind(a, b, axis=1)
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result.statistic.shape, (5, 0))
assert_equal(result.pvalue.shape, (5, 0))
def test_gh5686():
mean1, mean2 = np.array([1, 2]), np.array([3, 4])
std1, std2 = np.array([5, 3]), np.array([4, 5])
nobs1, nobs2 = np.array([130, 140]), np.array([100, 150])
# This will raise a TypeError unless gh-5686 is fixed.
stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
def test_ttest_ind_from_stats_inputs_zero():
# Regression test for gh-6409.
result = stats.ttest_ind_from_stats(0, 0, 6, 0, 0, 6, equal_var=False)
assert_equal(result, [np.nan, np.nan])
def test_ttest_single_observation():
# test that p-values are uniformly distributed under the null hypothesis
rng = np.random.default_rng(246834602926842)
x = rng.normal(size=(10000, 2))
y = rng.normal(size=(10000, 1))
q = rng.uniform(size=100)
res = stats.ttest_ind(x, y, equal_var=True, axis=-1)
assert stats.ks_1samp(res.pvalue, stats.uniform().cdf).pvalue > 0.1
assert_allclose(np.percentile(res.pvalue, q*100), q, atol=1e-2)
res = stats.ttest_ind(y, x, equal_var=True, axis=-1)
assert stats.ks_1samp(res.pvalue, stats.uniform().cdf).pvalue > 0.1
assert_allclose(np.percentile(res.pvalue, q*100), q, atol=1e-2)
# reference values from R:
# options(digits=16)
# t.test(c(2, 3, 5), c(1.5), var.equal=TRUE)
res = stats.ttest_ind([2, 3, 5], [1.5], equal_var=True)
assert_allclose(res, (1.0394023007754, 0.407779907736), rtol=1e-10)
def test_ttest_1samp_new():
n1, n2, n3 = (10,15,20)
rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
# check multidimensional array and correct axis handling
# deterministic rvn1 and rvn2 would be better as in test_ttest_rel
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n2,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1, 1, n3)),axis=1) # noqa
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2,1)),axis=2) # noqa
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n2))
# test zero division problem
t, p = stats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
# test alternative parameter
# Convert from two-sided p-values to one sided using T result data.
def convert(t, p, alt):
if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
return p / 2
return 1 - (p / 2)
converter = np.vectorize(convert)
tr, pr = stats.ttest_1samp(rvn1[:, :, :], 1)
t, p = stats.ttest_1samp(rvn1[:, :, :], 1, alternative="greater")
pc = converter(tr, pr, "greater")
assert_allclose(p, pc)
assert_allclose(t, tr)
t, p = stats.ttest_1samp(rvn1[:, :, :], 1, alternative="less")
pc = converter(tr, pr, "less")
assert_allclose(p, pc)
assert_allclose(t, tr)
with np.errstate(all='ignore'):
assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan],[-1, 1]])
assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan]))
rvn1[0:2, 1:3, 4:8] = np.nan
tr, pr = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit')
t, p = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit',
alternative="greater")
pc = converter(tr, pr, "greater")
assert_allclose(p, pc)
assert_allclose(t, tr)
t, p = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit',
alternative="less")
pc = converter(tr, pr, "less")
assert_allclose(p, pc)
assert_allclose(t, tr)
def test_ttest_1samp_popmean_array():
# when popmean.shape[axis] != 1, raise an error
# if the user wants to test multiple null hypotheses simultaneously,
# use standard broadcasting rules
rng = np.random.default_rng(2913300596553337193)
x = rng.random(size=(1, 15, 20))
message = r"`popmean.shape\[axis\]` must equal 1."
popmean = rng.random(size=(5, 2, 20))
with pytest.raises(ValueError, match=message):
stats.ttest_1samp(x, popmean=popmean, axis=-2)
popmean = rng.random(size=(5, 1, 20))
res = stats.ttest_1samp(x, popmean=popmean, axis=-2)
assert res.statistic.shape == (5, 20)
ci = np.expand_dims(res.confidence_interval(), axis=-2)
res = stats.ttest_1samp(x, popmean=ci, axis=-2)
assert_allclose(res.pvalue, 0.05)
class TestDescribe:
def test_describe_scalar(self):
with suppress_warnings() as sup, \
np.errstate(invalid="ignore", divide="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
n, mm, m, v, sk, kurt = stats.describe(4.)
assert_equal(n, 1)
assert_equal(mm, (4.0, 4.0))
assert_equal(m, 4.0)
assert np.isnan(v)
assert np.isnan(sk)
assert np.isnan(kurt)
def test_describe_numbers(self):
x = np.vstack((np.ones((3,4)), np.full((2, 4), 2)))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.3, 0.3, 0.3, 0.3])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
x = np.arange(10.)
x[9] = np.nan
nc, mmc = (9, (0.0, 8.0))
mc = 4.0
vc = 7.5
skc = 0.0
kurtc = -1.2300000000000002
n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc)
assert_array_almost_equal(kurt, kurtc, decimal=13)
assert_raises(ValueError, stats.describe, x, nan_policy='raise')
assert_raises(ValueError, stats.describe, x, nan_policy='foobar')
def test_describe_result_attributes(self):
actual = stats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes)
def test_describe_ddof(self):
x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.24, 0.24, 0.24, 0.24])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)
assert_equal(n, nc)
assert_allclose(mm, mmc, rtol=1e-15)
assert_allclose(m, mc, rtol=1e-15)
assert_allclose(v, vc, rtol=1e-15)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_describe_axis_none(self):
x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
# expected values
e_nobs, e_minmax = (20, (1.0, 2.0))
e_mean = 1.3999999999999999
e_var = 0.25263157894736848
e_skew = 0.4082482904638634
e_kurt = -1.8333333333333333
# actual values
a = stats.describe(x, axis=None)
assert_equal(a.nobs, e_nobs)
assert_almost_equal(a.minmax, e_minmax)
assert_almost_equal(a.mean, e_mean)
assert_almost_equal(a.variance, e_var)
assert_array_almost_equal(a.skewness, e_skew, decimal=13)
assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13)
def test_describe_empty(self):
assert_raises(ValueError, stats.describe, [])
def test_normalitytests():
assert_raises(ValueError, stats.skewtest, 4.)
assert_raises(ValueError, stats.kurtosistest, 4.)
assert_raises(ValueError, stats.normaltest, 4.)
# numbers verified with R: dagoTest in package fBasics
st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
pv_skew_less, pv_kurt_less = 1 - pv_skew / 2, pv_kurt / 2
pv_skew_greater, pv_kurt_greater = pv_skew / 2, 1 - pv_kurt / 2
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
attributes = ('statistic', 'pvalue')
assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal))
check_named_results(stats.normaltest(x), attributes)
assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew))
assert_array_almost_equal(stats.skewtest(x, alternative='less'),
(st_skew, pv_skew_less))
assert_array_almost_equal(stats.skewtest(x, alternative='greater'),
(st_skew, pv_skew_greater))
check_named_results(stats.skewtest(x), attributes)
assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt))
assert_array_almost_equal(stats.kurtosistest(x, alternative='less'),
(st_kurt, pv_kurt_less))
assert_array_almost_equal(stats.kurtosistest(x, alternative='greater'),
(st_kurt, pv_kurt_greater))
check_named_results(stats.kurtosistest(x), attributes)
# some more intuitive tests for kurtosistest and skewtest.
# see gh-13549.
# skew parameter is 1 > 0
a1 = stats.skewnorm.rvs(a=1, size=10000, random_state=123)
pval = stats.skewtest(a1, alternative='greater').pvalue
assert_almost_equal(pval, 0.0, decimal=5)
# excess kurtosis of laplace is 3 > 0
a2 = stats.laplace.rvs(size=10000, random_state=123)
pval = stats.kurtosistest(a2, alternative='greater').pvalue
assert_almost_equal(pval, 0.0)
# Test axis=None (equal to axis=0 for 1-D input)
assert_array_almost_equal(stats.normaltest(x, axis=None),
(st_normal, pv_normal))
assert_array_almost_equal(stats.skewtest(x, axis=None),
(st_skew, pv_skew))
assert_array_almost_equal(stats.kurtosistest(x, axis=None),
(st_kurt, pv_kurt))
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.skewtest(x), (np.nan, np.nan))
expected = (1.0184643553962129, 0.30845733195153502)
assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected)
# test alternative with nan_policy='omit'
a1[10:100] = np.nan
z, p = stats.skewtest(a1, nan_policy='omit')
zl, pl = stats.skewtest(a1, nan_policy='omit', alternative='less')
zg, pg = stats.skewtest(a1, nan_policy='omit', alternative='greater')
assert_allclose(zl, z, atol=1e-15)
assert_allclose(zg, z, atol=1e-15)
assert_allclose(pl, 1 - p/2, atol=1e-15)
assert_allclose(pg, p/2, atol=1e-15)
with np.errstate(all='ignore'):
assert_raises(ValueError, stats.skewtest, x, nan_policy='raise')
assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar')
assert_raises(ValueError, stats.skewtest, list(range(8)),
alternative='foobar')
x = np.arange(30.)
x[29] = np.nan
with np.errstate(all='ignore'):
assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan))
expected = (-2.2683547379505273, 0.023307594135872967)
assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'),
expected)
# test alternative with nan_policy='omit'
a2[10:20] = np.nan
z, p = stats.kurtosistest(a2[:100], nan_policy='omit')
zl, pl = stats.kurtosistest(a2[:100], nan_policy='omit',
alternative='less')
zg, pg = stats.kurtosistest(a2[:100], nan_policy='omit',
alternative='greater')
assert_allclose(zl, z, atol=1e-15)
assert_allclose(zg, z, atol=1e-15)
assert_allclose(pl, 1 - p/2, atol=1e-15)
assert_allclose(pg, p/2, atol=1e-15)
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar')
assert_raises(ValueError, stats.kurtosistest, list(range(20)),
alternative='foobar')
with np.errstate(all='ignore'):
assert_array_equal(stats.normaltest(x), (np.nan, np.nan))
expected = (6.2260409514287449, 0.04446644248650191)
assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected)
assert_raises(ValueError, stats.normaltest, x, nan_policy='raise')
assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar')
# regression test for issue gh-9033: x cleary non-normal but power of
# negtative denom needs to be handled correctly to reject normality
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(stats.kurtosistest(x)[1] < 0.01, True)
class TestRankSums:
np.random.seed(0)
x, y = np.random.rand(2, 10)
@pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
def test_ranksums_result_attributes(self, alternative):
# ranksums pval = mannwhitneyu pval w/out continuity or tie correction
res1 = stats.ranksums(self.x, self.y,
alternative=alternative).pvalue
res2 = stats.mannwhitneyu(self.x, self.y, use_continuity=False,
alternative=alternative).pvalue
assert_allclose(res1, res2)
def test_ranksums_named_results(self):
res = stats.ranksums(self.x, self.y)
check_named_results(res, ('statistic', 'pvalue'))
def test_input_validation(self):
with assert_raises(ValueError, match="alternative must be 'less'"):
stats.ranksums(self.x, self.y, alternative='foobar')
class TestJarqueBera:
def test_jarque_bera_stats(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
y = np.random.chisquare(10000, 100000)
z = np.random.rayleigh(1, 100000)
assert_equal(stats.jarque_bera(x)[0], stats.jarque_bera(x).statistic)
assert_equal(stats.jarque_bera(x)[1], stats.jarque_bera(x).pvalue)
assert_equal(stats.jarque_bera(y)[0], stats.jarque_bera(y).statistic)
assert_equal(stats.jarque_bera(y)[1], stats.jarque_bera(y).pvalue)
assert_equal(stats.jarque_bera(z)[0], stats.jarque_bera(z).statistic)
assert_equal(stats.jarque_bera(z)[1], stats.jarque_bera(z).pvalue)
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])
assert_(stats.jarque_bera(x).pvalue > stats.jarque_bera(y).pvalue)
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])
assert_(stats.jarque_bera(x).pvalue > stats.jarque_bera(z).pvalue)
assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])
assert_(stats.jarque_bera(y).pvalue > stats.jarque_bera(z).pvalue)
def test_jarque_bera_array_like(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
jb_test1 = JB1, p1 = stats.jarque_bera(list(x))
jb_test2 = JB2, p2 = stats.jarque_bera(tuple(x))
jb_test3 = JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))
assert_(JB1 == JB2 == JB3 == jb_test1.statistic == jb_test2.statistic == jb_test3.statistic)
assert_(p1 == p2 == p3 == jb_test1.pvalue == jb_test2.pvalue == jb_test3.pvalue)
def test_jarque_bera_size(self):
assert_raises(ValueError, stats.jarque_bera, [])
def test_axis(self):
rng = np.random.default_rng(abs(hash('JarqueBera')))
x = rng.random(size=(2, 45))
assert_equal(stats.jarque_bera(x, axis=None),
stats.jarque_bera(x.ravel()))
res = stats.jarque_bera(x, axis=1)
s0, p0 = stats.jarque_bera(x[0, :])
s1, p1 = stats.jarque_bera(x[1, :])
assert_allclose(res.statistic, [s0, s1])
assert_allclose(res.pvalue, [p0, p1])
resT = stats.jarque_bera(x.T, axis=0)
assert_allclose(res, resT)
def test_skewtest_too_few_samples():
# Regression test for ticket #1492.
# skewtest requires at least 8 samples; 7 should raise a ValueError.
x = np.arange(7.0)
assert_raises(ValueError, stats.skewtest, x)
def test_kurtosistest_too_few_samples():
# Regression test for ticket #1425.
# kurtosistest requires at least 5 samples; 4 should raise a ValueError.
x = np.arange(4.0)
assert_raises(ValueError, stats.kurtosistest, x)
class TestMannWhitneyU:
X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,
20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,
19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,
20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,
20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,
19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,
17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,
20.4970638083542, 19.5567594734914]
Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,
19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,
19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,
18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,
19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]
significant = 14
def test_mannwhitneyu_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999957683256589, significant=self.significant)
assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant)
def test_mannwhitneyu_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 9.188326533255e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999955905990004, significant=self.significant)
assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant)
def test_mannwhitneyu_no_correct_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 8.81880199916178e-05,
significant=self.significant)
def test_mannwhitneyu_ones(self):
# test for gh-1428
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
# checked against R wilcox.test
assert_allclose(stats.mannwhitneyu(x, y, alternative='less'),
(16980.5, 2.8214327656317373e-005))
# p-value from R, e.g. wilcox.test(x, y, alternative="g")
assert_allclose(stats.mannwhitneyu(x, y, alternative='greater'),
(16980.5, 0.9999719954296))
assert_allclose(stats.mannwhitneyu(x, y, alternative='two-sided'),
(16980.5, 5.642865531266e-05))
def test_mannwhitneyu_result_attributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.mannwhitneyu(self.X, self.Y, alternative="less")
check_named_results(res, attributes)
def test_pointbiserial():
# same as mstats test except for the nan
# Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1]
assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.pointbiserialr(x, y)
check_named_results(res, attributes)
assert_equal(res.correlation, res.statistic)
def test_obrientransform():
# A couple tests calculated by hand.
x1 = np.array([0, 2, 4])
t1 = stats.obrientransform(x1)
expected = [7, -2, 7]
assert_allclose(t1[0], expected)
x2 = np.array([0, 3, 6, 9])
t2 = stats.obrientransform(x2)
expected = np.array([30, 0, 0, 30])
assert_allclose(t2[0], expected)
# Test two arguments.
a, b = stats.obrientransform(x1, x2)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
# Test three arguments.
a, b, c = stats.obrientransform(x1, x2, x1)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
assert_equal(c, t1[0])
# This is a regression test to check np.var replacement.
# The author of this test didn't separately verify the numbers.
x1 = np.arange(5)
result = np.array(
[[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
[21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
# Example from "O'Brien Test for Homogeneity of Variance"
# by Herve Abdi.
values = range(5, 11)
reps = np.array([5, 11, 9, 3, 2, 2])
data = np.repeat(values, reps)
transformed_values = np.array([3.1828, 0.5591, 0.0344,
1.6086, 5.2817, 11.0538])
expected = np.repeat(transformed_values, reps)
result = stats.obrientransform(data)
assert_array_almost_equal(result[0], expected, decimal=4)
def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7,
weights=None):
# Note this doesn't test when axis is not specified
x = stats.gmean(array_like, axis=axis, dtype=dtype, weights=weights)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7,
weights=None):
x = stats.hmean(array_like, axis=axis, dtype=dtype, weights=weights)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_pmean(array_like, exp, desired, axis=None, dtype=None,
rtol=1e-7, weights=None):
x = stats.pmean(array_like, exp, axis=axis, dtype=dtype, weights=weights)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
class TestHarMean:
def test_0(self):
a = [1, 0, 2]
desired = 0
check_equal_hmean(a, desired)
def test_1d_list(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 34.1417152147
check_equal_hmean(a, desired)
a = [1, 2, 3, 4]
desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4)
check_equal_hmean(a, desired)
def test_1d_array(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 34.1417152147
check_equal_hmean(a, desired)
def test_1d_array_with_zero(self):
a = np.array([1, 0])
desired = 0.0
assert_equal(stats.hmean(a), desired)
def test_1d_array_with_negative_value(self):
a = np.array([1, 0, -1])
assert_raises(ValueError, stats.hmean, a)
# Note the next tests use axis=None as default, not axis=0
def test_2d_list(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(a, desired)
def test_2d_array(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(np.array(a), desired)
def test_2d_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])
check_equal_hmean(a, desired, axis=0)
def test_2d_axis0_with_zero(self):
a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.88135593, 0.0, 52.90076336, 65.45454545])
assert_allclose(stats.hmean(a, axis=0), desired)
def test_2d_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([19.2, 63.03939962, 103.80078637])
check_equal_hmean(a, desired, axis=1)
def test_2d_axis1_with_zero(self):
a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([0.0, 63.03939962, 103.80078637])
assert_allclose(stats.hmean(a, axis=1), desired)
def test_weights_1d_list(self):
# Desired result from:
# https://www.hackmath.net/en/math-problem/35871
a = [2, 10, 6]
weights = [10, 5, 3]
desired = 3
check_equal_hmean(a, desired, weights=weights, rtol=1e-5)
def test_weights_2d_array_axis0(self):
# Desired result from:
# https://www.hackmath.net/en/math-problem/35871
a = np.array([[2, 5], [10, 5], [6, 5]])
weights = np.array([[10, 1], [5, 1], [3, 1]])
desired = np.array([3, 5])
check_equal_hmean(a, desired, axis=0, weights=weights, rtol=1e-5)
def test_weights_2d_array_axis1(self):
# Desired result from:
# https://www.hackmath.net/en/math-problem/35871
a = np.array([[2, 10, 6], [7, 7, 7]])
weights = np.array([[10, 5, 3], [1, 1, 1]])
desired = np.array([3, 7])
check_equal_hmean(a, desired, axis=1, weights=weights, rtol=1e-5)
def test_weights_masked_1d_array(self):
# Desired result from:
# https://www.hackmath.net/en/math-problem/35871
a = np.array([2, 10, 6, 42])
weights = np.ma.array([10, 5, 3, 42], mask=[0, 0, 0, 1])
desired = 3
check_equal_hmean(a, desired, weights=weights, rtol=1e-5)
class TestGeoMean:
def test_0(self):
a = [1, 0, 2]
desired = 0
check_equal_gmean(a, desired)
def test_1d_list(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 45.2872868812
check_equal_gmean(a, desired)
a = [1, 2, 3, 4]
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_array(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 45.2872868812
check_equal_gmean(a, desired)
a = array([1, 2, 3, 4], float32)
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, dtype=float32)
# Note the next tests use axis=None as default, not axis=0
def test_2d_list(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(a, desired)
def test_2d_array(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(array(a), desired)
def test_2d_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
check_equal_gmean(a, desired, axis=0)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14)
def test_2d_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.13363839, 64.02171746, 104.40086817])
check_equal_gmean(a, desired, axis=1)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
v = power(1 * 2 * 3 * 4, 1. / 4.)
desired = array([v, v, v])
check_equal_gmean(a, desired, axis=1, rtol=1e-14)
def test_large_values(self):
a = array([1e100, 1e200, 1e300])
desired = 1e200
check_equal_gmean(a, desired, rtol=1e-13)
def test_1d_list0(self):
# Test a 1d list with zero element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
desired = 0.0 # due to exp(-inf)=0
with np.errstate(all='ignore'):
check_equal_gmean(a, desired)
def test_1d_array0(self):
# Test a 1d array with zero element
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
desired = 0.0 # due to exp(-inf)=0
with np.errstate(divide='ignore'):
check_equal_gmean(a, desired)
def test_1d_list_neg(self):
# Test a 1d list with negative element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, -1]
desired = np.nan # due to log(-1) = nan
with np.errstate(invalid='ignore'):
check_equal_gmean(a, desired)
def test_weights_1d_list(self):
# Desired result from:
# https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
a = [1, 2, 3, 4, 5]
weights = [2, 5, 6, 4, 3]
desired = 2.77748
check_equal_gmean(a, desired, weights=weights, rtol=1e-5)
def test_weights_1d_array(self):
# Desired result from:
# https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
a = np.array([1, 2, 3, 4, 5])
weights = np.array([2, 5, 6, 4, 3])
desired = 2.77748
check_equal_gmean(a, desired, weights=weights, rtol=1e-5)
def test_weights_masked_1d_array(self):
# Desired result from:
# https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
a = np.array([1, 2, 3, 4, 5, 6])
weights = np.ma.array([2, 5, 6, 4, 3, 5], mask=[0, 0, 0, 0, 0, 1])
desired = 2.77748
check_equal_gmean(a, desired, weights=weights, rtol=1e-5)
class TestPowMean:
def pmean_reference(a, p):
return (np.sum(a**p) / a.size)**(1/p)
def wpmean_reference(a, p, weights):
return (np.sum(weights * a**p) / np.sum(weights))**(1/p)
def test_bad_exponent(self):
with pytest.raises(ValueError, match='Power mean only defined for'):
stats.pmean([1, 2, 3], [0])
with pytest.raises(ValueError, match='Power mean only defined for'):
stats.pmean([1, 2, 3], np.array([0]))
def test_1d_list(self):
a, p = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 3.5
desired = TestPowMean.pmean_reference(np.array(a), p)
check_equal_pmean(a, p, desired)
a, p = [1, 2, 3, 4], 2
desired = np.sqrt((1**2 + 2**2 + 3**2 + 4**2) / 4)
check_equal_pmean(a, p, desired)
def test_1d_array(self):
a, p = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]), -2.5
desired = TestPowMean.pmean_reference(a, p)
check_equal_pmean(a, p, desired)
def test_1d_array_with_zero(self):
a, p = np.array([1, 0]), -1
desired = 0.0
assert_equal(stats.pmean(a, p), desired)
def test_1d_array_with_negative_value(self):
a, p = np.array([1, 0, -1]), 1.23
with pytest.raises(ValueError, match='Power mean only defined if all'):
stats.pmean(a, p)
@pytest.mark.parametrize(
("a", "p"),
[([[10, 20], [50, 60], [90, 100]], -0.5),
(np.array([[10, 20], [50, 60], [90, 100]]), 0.5)]
)
def test_2d_axisnone(self, a, p):
desired = TestPowMean.pmean_reference(np.array(a), p)
check_equal_pmean(a, p, desired)
@pytest.mark.parametrize(
("a", "p"),
[([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], -0.5),
([[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], 0.5)]
)
def test_2d_list_axis0(self, a, p):
desired = [
TestPowMean.pmean_reference(
np.array([a[i][j] for i in range(len(a))]), p
)
for j in range(len(a[0]))
]
check_equal_pmean(a, p, desired, axis=0)
@pytest.mark.parametrize(
("a", "p"),
[([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], -0.5),
([[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], 0.5)]
)
def test_2d_list_axis1(self, a, p):
desired = [TestPowMean.pmean_reference(np.array(a_), p) for a_ in a]
check_equal_pmean(a, p, desired, axis=1)
def test_weights_1d_list(self):
a, p = [2, 10, 6], -1.23456789
weights = [10, 5, 3]
desired = TestPowMean.wpmean_reference(np.array(a), p, weights)
check_equal_pmean(a, p, desired, weights=weights, rtol=1e-5)
def test_weights_masked_1d_array(self):
a, p = np.array([2, 10, 6, 42]), 1
weights = np.ma.array([10, 5, 3, 42], mask=[0, 0, 0, 1])
desired = np.average(a, weights=weights)
check_equal_pmean(a, p, desired, weights=weights, rtol=1e-5)
@pytest.mark.parametrize(
("axis", "fun_name", "p"),
[(None, "wpmean_reference", 9.87654321),
(0, "gmean", 0),
(1, "hmean", -1)]
)
def test_weights_2d_array(self, axis, fun_name, p):
if fun_name == 'wpmean_reference':
def fun(a, axis, weights):
return TestPowMean.wpmean_reference(a, p, weights)
else:
fun = getattr(stats, fun_name)
a = np.array([[2, 5], [10, 5], [6, 5]])
weights = np.array([[10, 1], [5, 1], [3, 1]])
desired = fun(a, axis=axis, weights=weights)
check_equal_pmean(a, p, desired, axis=axis, weights=weights, rtol=1e-5)
class TestGeometricStandardDeviation:
# must add 1 as `gstd` is only defined for positive values
array_1d = np.arange(2 * 3 * 4) + 1
gstd_array_1d = 2.294407613602
array_3d = array_1d.reshape(2, 3, 4)
def test_1d_array(self):
gstd_actual = stats.gstd(self.array_1d)
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_1d_numeric_array_like_input(self):
gstd_actual = stats.gstd(tuple(self.array_1d))
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_raises_value_error_non_array_like_input(self):
with pytest.raises(ValueError, match='Invalid array input'):
stats.gstd('This should fail as it can not be cast to an array.')
def test_raises_value_error_zero_entry(self):
with pytest.raises(ValueError, match='Non positive value'):
stats.gstd(np.append(self.array_1d, [0]))
def test_raises_value_error_negative_entry(self):
with pytest.raises(ValueError, match='Non positive value'):
stats.gstd(np.append(self.array_1d, [-1]))
def test_raises_value_error_inf_entry(self):
with pytest.raises(ValueError, match='Infinite value'):
stats.gstd(np.append(self.array_1d, [np.inf]))
def test_propagates_nan_values(self):
a = array([[1, 1, 1, 16], [np.nan, 1, 2, 3]])
gstd_actual = stats.gstd(a, axis=1)
assert_allclose(gstd_actual, np.array([4, np.nan]))
def test_ddof_equal_to_number_of_observations(self):
with pytest.raises(ValueError, match='Degrees of freedom <= 0'):
stats.gstd(self.array_1d, ddof=self.array_1d.size)
def test_3d_array(self):
gstd_actual = stats.gstd(self.array_3d, axis=None)
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_3d_array_axis_type_tuple(self):
gstd_actual = stats.gstd(self.array_3d, axis=(1,2))
assert_allclose(gstd_actual, [2.12939215, 1.22120169])
def test_3d_array_axis_0(self):
gstd_actual = stats.gstd(self.array_3d, axis=0)
gstd_desired = np.array([
[6.1330555493918, 3.958900210120, 3.1206598248344, 2.6651441426902],
[2.3758135028411, 2.174581428192, 2.0260062829505, 1.9115518327308],
[1.8205343606803, 1.746342404566, 1.6846557065742, 1.6325269194382]
])
assert_allclose(gstd_actual, gstd_desired)
def test_3d_array_axis_1(self):
gstd_actual = stats.gstd(self.array_3d, axis=1)
gstd_desired = np.array([
[3.118993630946, 2.275985934063, 1.933995977619, 1.742896469724],
[1.271693593916, 1.254158641801, 1.238774141609, 1.225164057869]
])
assert_allclose(gstd_actual, gstd_desired)
def test_3d_array_axis_2(self):
gstd_actual = stats.gstd(self.array_3d, axis=2)
gstd_desired = np.array([
[1.8242475707664, 1.2243686572447, 1.1318311657788],
[1.0934830582351, 1.0724479791887, 1.0591498540749]
])
assert_allclose(gstd_actual, gstd_desired)
def test_masked_3d_array(self):
ma = np.ma.masked_where(self.array_3d > 16, self.array_3d)
gstd_actual = stats.gstd(ma, axis=2)
gstd_desired = stats.gstd(self.array_3d, axis=2)
mask = [[0, 0, 0], [0, 1, 1]]
assert_allclose(gstd_actual, gstd_desired)
assert_equal(gstd_actual.mask, mask)
def test_binomtest():
# precision tests compared to R for ticket:986
pp = np.concatenate((np.linspace(0.1, 0.2, 5),
np.linspace(0.45, 0.65, 5),
np.linspace(0.85, 0.95, 5)))
n = 501
x = 450
results = [0.0, 0.0, 1.0159969301994141e-304,
2.9752418572150531e-275, 7.7668382922535275e-250,
2.3381250925167094e-099, 7.8284591587323951e-081,
9.9155947819961383e-065, 2.8729390725176308e-050,
1.7175066298388421e-037, 0.0021070691951093692,
0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
2.6102587134694721e-006]
for p, res in zip(pp, results):
assert_approx_equal(stats.binomtest(x, n, p).pvalue, res,
significant=12, err_msg='fail forp=%f' % p)
assert_approx_equal(stats.binomtest(50, 100, 0.1).pvalue,
5.8320387857343647e-024,
significant=12)
def test_binomtest2():
# test added for issue #2384
res2 = [
[1.0, 1.0],
[0.5, 1.0, 0.5],
[0.25, 1.00, 1.00, 0.25],
[0.125, 0.625, 1.000, 0.625, 0.125],
[0.0625, 0.3750, 1.0000, 1.0000, 0.3750, 0.0625],
[0.03125, 0.21875, 0.68750, 1.00000, 0.68750, 0.21875, 0.03125],
[0.015625, 0.125000, 0.453125, 1.000000, 1.000000, 0.453125, 0.125000,
0.015625],
[0.0078125, 0.0703125, 0.2890625, 0.7265625, 1.0000000, 0.7265625,
0.2890625, 0.0703125, 0.0078125],
[0.00390625, 0.03906250, 0.17968750, 0.50781250, 1.00000000,
1.00000000, 0.50781250, 0.17968750, 0.03906250, 0.00390625],
[0.001953125, 0.021484375, 0.109375000, 0.343750000, 0.753906250,
1.000000000, 0.753906250, 0.343750000, 0.109375000, 0.021484375,
0.001953125]
]
for k in range(1, 11):
res1 = [stats.binomtest(v, k, 0.5).pvalue for v in range(k + 1)]
assert_almost_equal(res1, res2[k-1], decimal=10)
def test_binomtest3():
# test added for issue #2384
# test when x == n*p and neighbors
res3 = [stats.binomtest(v, v*k, 1./k).pvalue
for v in range(1, 11) for k in range(2, 11)]
assert_equal(res3, np.ones(len(res3), int))
# > bt=c()
# > for(i in as.single(1:10)) {
# + for(k in as.single(2:10)) {
# + bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value);
# + print(c(i+1, k*i,(1/k)))
# + }
# + }
binom_testm1 = np.array([
0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,
0.5981224279835393, 0.603430543396034, 0.607304096221924,
0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,
0.68853759765625, 0.6980101120000006, 0.703906431368616,
0.70793209416498, 0.7108561134173507, 0.713076544331419,
0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,
0.74986110468096, 0.7548015520398076, 0.7581671424768577,
0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,
0.761553963657302, 0.774800934828818, 0.7818005980538996,
0.78613491480358, 0.789084353140195, 0.7912217659828884,
0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,
0.7976688481430754, 0.8039848974727624, 0.807891868948366,
0.8105487660137676, 0.812473307174702, 0.8139318233591120,
0.815075399104785, 0.7744140625, 0.8037322594985427,
0.814742863657656, 0.8205425178645808, 0.8241275984172285,
0.8265645374416, 0.8283292196088257, 0.829666291102775,
0.8307144686362666, 0.7905273437499996, 0.8178712053954738,
0.828116983756619, 0.833508948940494, 0.8368403871552892,
0.839104213210105, 0.840743186196171, 0.84198481438049,
0.8429580531563676, 0.803619384765625, 0.829338573944648,
0.8389591907548646, 0.84401876783902, 0.84714369697889,
0.8492667010581667, 0.850803474598719, 0.851967542858308,
0.8528799045949524, 0.8145294189453126, 0.838881732845347,
0.847979024541911, 0.852760894015685, 0.8557134656773457,
0.8577190131799202, 0.85917058278431, 0.860270010472127,
0.861131648404582, 0.823802947998047, 0.846984756807511,
0.855635653643743, 0.860180994825685, 0.86298688573253,
0.864892525675245, 0.866271647085603, 0.867316125625004,
0.8681346531755114
])
# > bt=c()
# > for(i in as.single(1:10)) {
# + for(k in as.single(2:10)) {
# + bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value);
# + print(c(i+1, k*i,(1/k)))
# + }
# + }
binom_testp1 = np.array([
0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,
0.2635138663069203, 0.2636951804161073, 0.2638162407564354,
0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,
0.4295746560000003, 0.43473045988554, 0.4383309503172684,
0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,
0.4927602499618962, 0.5096031427383425, 0.5189636628480,
0.5249280070771274, 0.5290623300865124, 0.5320974248125793,
0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,
0.5669248746708034, 0.576436455045805, 0.5824538812831795,
0.5866053321547824, 0.589642781414643, 0.5919618019300193,
0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,
0.617303847446822, 0.623172512167948, 0.627208862156123,
0.6301556891501057, 0.632401894928977, 0.6341708982290303,
0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,
0.65392850011132, 0.657816519817211, 0.660650782947676,
0.662808780346311, 0.6645068560246006, 0.7905273437499996,
0.6478843304312477, 0.6640468318879372, 0.6727589686071775,
0.6782129857784873, 0.681950188903695, 0.684671508668418,
0.686741824999918, 0.688369886732168, 0.803619384765625,
0.668716055304315, 0.684360013879534, 0.6927642396829181,
0.6980155964704895, 0.701609591890657, 0.7042244320992127,
0.7062125081341817, 0.707775152962577, 0.8145294189453126,
0.686243374488305, 0.7013873696358975, 0.709501223328243,
0.714563595144314, 0.718024953392931, 0.7205416252126137,
0.722454130389843, 0.723956813292035, 0.823802947998047,
0.701255953767043, 0.715928221686075, 0.723772209289768,
0.7286603031173616, 0.7319999279787631, 0.7344267920995765,
0.736270323773157, 0.737718376096348
])
res4_p1 = [stats.binomtest(v+1, v*k, 1./k).pvalue
for v in range(1, 11) for k in range(2, 11)]
res4_m1 = [stats.binomtest(v-1, v*k, 1./k).pvalue
for v in range(1, 11) for k in range(2, 11)]
assert_almost_equal(res4_p1, binom_testp1, decimal=13)
assert_almost_equal(res4_m1, binom_testm1, decimal=13)
class TestTrim:
# test trim functions
def test_trim1(self):
a = np.arange(11)
assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10))
assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9))
assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')),
np.arange(2, 11))
assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')),
np.arange(3, 11))
assert_equal(stats.trim1(a, 1.0), [])
assert_equal(stats.trim1(a, 1.0, tail='left'), [])
# empty input
assert_equal(stats.trim1([], 0.1), [])
assert_equal(stats.trim1([], 3/11., tail='left'), [])
assert_equal(stats.trim1([], 4/6.), [])
# test axis
a = np.arange(24).reshape(6, 4)
ref = np.arange(4, 24).reshape(5, 4) # first row trimmed
axis = 0
trimmed = stats.trim1(a, 0.2, tail='left', axis=axis)
assert_equal(np.sort(trimmed, axis=axis), ref)
axis = 1
trimmed = stats.trim1(a.T, 0.2, tail='left', axis=axis)
assert_equal(np.sort(trimmed, axis=axis), ref.T)
def test_trimboth(self):
a = np.arange(11)
assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8))
assert_equal(np.sort(stats.trimboth(a, 0.2)),
np.array([2, 3, 4, 5, 6, 7, 8]))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)),
np.arange(4, 20).reshape(4, 4))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T,
2/6.)),
np.array([[2, 8, 14, 20], [3, 9, 15, 21]]))
assert_raises(ValueError, stats.trimboth,
np.arange(24).reshape(4, 6).T, 4/6.)
# empty input
assert_equal(stats.trimboth([], 0.1), [])
assert_equal(stats.trimboth([], 3/11.), [])
assert_equal(stats.trimboth([], 4/6.), [])
def test_trim_mean(self):
# don't use pre-sorted arrays
a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])
idx = np.array([3, 5, 0, 1, 2, 4])
a2 = np.arange(24).reshape(6, 4)[idx, :]
a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]
assert_equal(stats.trim_mean(a3, 2/6.),
np.array([2.5, 8.5, 14.5, 20.5]))
assert_equal(stats.trim_mean(a2, 2/6.),
np.array([10., 11., 12., 13.]))
idx4 = np.array([1, 0, 3, 2])
a4 = np.arange(24).reshape(4, 6)[idx4, :]
assert_equal(stats.trim_mean(a4, 2/6.),
np.array([9., 10., 11., 12., 13., 14.]))
# shuffled arange(24) as array_like
a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,
20, 2, 14, 4, 13, 8, 3]
assert_equal(stats.trim_mean(a, 2/6.), 11.5)
assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
# check axis argument
np.random.seed(1234)
a = np.random.randint(20, size=(5, 6, 4, 7))
for axis in [0, 1, 2, 3, -1]:
res1 = stats.trim_mean(a, 2/6., axis=axis)
res2 = stats.trim_mean(np.moveaxis(a, axis, 0), 2/6.)
assert_equal(res1, res2)
res1 = stats.trim_mean(a, 2/6., axis=None)
res2 = stats.trim_mean(a.ravel(), 2/6.)
assert_equal(res1, res2)
assert_raises(ValueError, stats.trim_mean, a, 0.6)
# empty input
assert_equal(stats.trim_mean([], 0.0), np.nan)
assert_equal(stats.trim_mean([], 0.6), np.nan)
class TestSigmaClip:
def test_sigmaclip1(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 4 # default
c, low, upp = stats.sigmaclip(a)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, a.size)
def test_sigmaclip2(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 1.5
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, 4)
assert_equal(a.size, 36) # check original array unchanged
def test_sigmaclip3(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c, np.linspace(9.5, 10.5, 11))
def test_sigmaclip_result_attributes(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
res = stats.sigmaclip(a, fact, fact)
attributes = ('clipped', 'lower', 'upper')
check_named_results(res, attributes)
def test_std_zero(self):
# regression test #8632
x = np.ones(10)
assert_equal(stats.sigmaclip(x)[0], x)
class TestAlexanderGovern:
def test_compare_dtypes(self):
args = [[13, 13, 13, 13, 13, 13, 13, 12, 12],
[14, 13, 12, 12, 12, 12, 12, 11, 11],
[14, 14, 13, 13, 13, 13, 13, 12, 12],
[15, 14, 13, 13, 13, 12, 12, 12, 11]]
args_int16 = np.array(args, dtype=np.int16)
args_int32 = np.array(args, dtype=np.int32)
args_uint8 = np.array(args, dtype=np.uint8)
args_float64 = np.array(args, dtype=np.float64)
res_int16 = stats.alexandergovern(*args_int16)
res_int32 = stats.alexandergovern(*args_int32)
res_unit8 = stats.alexandergovern(*args_uint8)
res_float64 = stats.alexandergovern(*args_float64)
assert (res_int16.pvalue == res_int32.pvalue ==
res_unit8.pvalue == res_float64.pvalue)
assert (res_int16.statistic == res_int32.statistic ==
res_unit8.statistic == res_float64.statistic)
def test_bad_inputs(self):
# input array is of size zero
with assert_raises(ValueError, match="Input sample size must be"
" greater than one."):
stats.alexandergovern([1, 2], [])
# input is a singular non list element
with assert_raises(ValueError, match="Input sample size must be"
" greater than one."):
stats.alexandergovern([1, 2], 2)
# input list is of size 1
with assert_raises(ValueError, match="Input sample size must be"
" greater than one."):
stats.alexandergovern([1, 2], [2])
# inputs are not finite (infinity)
with assert_raises(ValueError, match="Input samples must be finite."):
stats.alexandergovern([1, 2], [np.inf, np.inf])
# inputs are multidimensional
with assert_raises(ValueError, match="Input samples must be one"
"-dimensional"):
stats.alexandergovern([1, 2], [[1, 2], [3, 4]])
def test_compare_r(self):
'''
Data generated in R with
> set.seed(1)
> library("onewaytests")
> library("tibble")
> y <- c(rnorm(40, sd=10),
+ rnorm(30, sd=15),
+ rnorm(20, sd=20))
> x <- c(rep("one", times=40),
+ rep("two", times=30),
+ rep("eight", times=20))
> x <- factor(x)
> ag.test(y ~ x, tibble(y,x))
Alexander-Govern Test (alpha = 0.05)
-------------------------------------------------------------
data : y and x
statistic : 1.359941
parameter : 2
p.value : 0.5066321
Result : Difference is not statistically significant.
-------------------------------------------------------------
Example adapted from:
https://eval-serv2.metpsy.uni-jena.de/wiki-metheval-hp/index.php/R_FUN_Alexander-Govern
'''
one = [-6.264538107423324, 1.8364332422208225, -8.356286124100471,
15.952808021377916, 3.295077718153605, -8.204683841180152,
4.874290524284853, 7.383247051292173, 5.757813516534923,
-3.0538838715635603, 15.11781168450848, 3.898432364114311,
-6.2124058054180376, -22.146998871774997, 11.249309181431082,
-0.4493360901523085, -0.16190263098946087, 9.438362106852992,
8.212211950980885, 5.939013212175088, 9.189773716082183,
7.821363007310671, 0.745649833651906, -19.89351695863373,
6.198257478947102, -0.5612873952900078, -1.557955067053293,
-14.707523838992744, -4.781500551086204, 4.179415601997024,
13.58679551529044, -1.0278772734299553, 3.876716115593691,
-0.5380504058290512, -13.770595568286065, -4.149945632996798,
-3.942899537103493, -0.5931339671118566, 11.000253719838831,
7.631757484575442]
two = [-2.4678539438038034, -3.8004252020476135, 10.454450631071062,
8.34994798010486, -10.331335418242798, -10.612427354431794,
5.468729432052455, 11.527993867731237, -1.6851931822534207,
13.216615896813222, 5.971588205506021, -9.180395898761569,
5.116795371366372, -16.94044644121189, 21.495355525515556,
29.7059984775879, -5.508322146997636, -15.662019394747961,
8.545794411636193, -2.0258190582123654, 36.024266407571645,
-0.5886000409975387, 10.346090436761651, 0.4200323817099909,
-11.14909813323608, 2.8318844927151434, -27.074379433365568,
21.98332292344329, 2.2988000731784655, 32.58917505543229]
eight = [9.510190577993251, -14.198928618436291, 12.214527069781099,
-18.68195263288503, -25.07266800478204, 5.828924710349257,
-8.86583746436866, 0.02210703263248262, 1.4868264830332811,
-11.79041892376144, -11.37337465637004, -2.7035723024766414,
23.56173993146409, -30.47133600859524, 11.878923752568431,
6.659007424270365, 21.261996745527256, -6.083678472686013,
7.400376198325763, 5.341975815444621]
soln = stats.alexandergovern(one, two, eight)
assert_allclose(soln.statistic, 1.3599405447999450836)
assert_allclose(soln.pvalue, 0.50663205309676440091)
def test_compare_scholar(self):
'''
Data taken from 'The Modification and Evaluation of the
Alexander-Govern Test in Terms of Power' by Kingsley Ochuko, T.,
Abdullah, S., Binti Zain, Z., & Soaad Syed Yahaya, S. (2015).
'''
young = [482.43, 484.36, 488.84, 495.15, 495.24, 502.69, 504.62,
518.29, 519.1, 524.1, 524.12, 531.18, 548.42, 572.1, 584.68,
609.09, 609.53, 666.63, 676.4]
middle = [335.59, 338.43, 353.54, 404.27, 437.5, 469.01, 485.85,
487.3, 493.08, 494.31, 499.1, 886.41]
old = [519.01, 528.5, 530.23, 536.03, 538.56, 538.83, 557.24, 558.61,
558.95, 565.43, 586.39, 594.69, 629.22, 645.69, 691.84]
soln = stats.alexandergovern(young, middle, old)
assert_allclose(soln.statistic, 5.3237, atol=1e-3)
assert_allclose(soln.pvalue, 0.06982, atol=1e-4)
# verify with ag.test in r
'''
> library("onewaytests")
> library("tibble")
> young <- c(482.43, 484.36, 488.84, 495.15, 495.24, 502.69, 504.62,
+ 518.29, 519.1, 524.1, 524.12, 531.18, 548.42, 572.1,
+ 584.68, 609.09, 609.53, 666.63, 676.4)
> middle <- c(335.59, 338.43, 353.54, 404.27, 437.5, 469.01, 485.85,
+ 487.3, 493.08, 494.31, 499.1, 886.41)
> old <- c(519.01, 528.5, 530.23, 536.03, 538.56, 538.83, 557.24,
+ 558.61, 558.95, 565.43, 586.39, 594.69, 629.22,
+ 645.69, 691.84)
> young_fct <- c(rep("young", times=19))
> middle_fct <-c(rep("middle", times=12))
> old_fct <- c(rep("old", times=15))
> ag.test(a ~ b, tibble(a=c(young, middle, old), b=factor(c(young_fct,
+ middle_fct, old_fct))))
Alexander-Govern Test (alpha = 0.05)
-------------------------------------------------------------
data : a and b
statistic : 5.324629
parameter : 2
p.value : 0.06978651
Result : Difference is not statistically significant.
-------------------------------------------------------------
'''
assert_allclose(soln.statistic, 5.324629)
assert_allclose(soln.pvalue, 0.06978651)
def test_compare_scholar3(self):
'''
Data taken from 'Robustness And Comparative Power Of WelchAspin,
Alexander-Govern And Yuen Tests Under Non-Normality And Variance
Heteroscedasticity', by Ayed A. Almoied. 2017. Page 34-37.
https://digitalcommons.wayne.edu/cgi/viewcontent.cgi?article=2775&context=oa_dissertations
'''
x1 = [-1.77559, -1.4113, -0.69457, -0.54148, -0.18808, -0.07152,
0.04696, 0.051183, 0.148695, 0.168052, 0.422561, 0.458555,
0.616123, 0.709968, 0.839956, 0.857226, 0.929159, 0.981442,
0.999554, 1.642958]
x2 = [-1.47973, -1.2722, -0.91914, -0.80916, -0.75977, -0.72253,
-0.3601, -0.33273, -0.28859, -0.09637, -0.08969, -0.01824,
0.260131, 0.289278, 0.518254, 0.683003, 0.877618, 1.172475,
1.33964, 1.576766]
soln = stats.alexandergovern(x1, x2)
assert_allclose(soln.statistic, 0.713526, atol=1e-5)
assert_allclose(soln.pvalue, 0.398276, atol=1e-5)
'''
tested in ag.test in R:
> library("onewaytests")
> library("tibble")
> x1 <- c(-1.77559, -1.4113, -0.69457, -0.54148, -0.18808, -0.07152,
+ 0.04696, 0.051183, 0.148695, 0.168052, 0.422561, 0.458555,
+ 0.616123, 0.709968, 0.839956, 0.857226, 0.929159, 0.981442,
+ 0.999554, 1.642958)
> x2 <- c(-1.47973, -1.2722, -0.91914, -0.80916, -0.75977, -0.72253,
+ -0.3601, -0.33273, -0.28859, -0.09637, -0.08969, -0.01824,
+ 0.260131, 0.289278, 0.518254, 0.683003, 0.877618, 1.172475,
+ 1.33964, 1.576766)
> x1_fact <- c(rep("x1", times=20))
> x2_fact <- c(rep("x2", times=20))
> a <- c(x1, x2)
> b <- factor(c(x1_fact, x2_fact))
> ag.test(a ~ b, tibble(a, b))
Alexander-Govern Test (alpha = 0.05)
-------------------------------------------------------------
data : a and b
statistic : 0.7135182
parameter : 1
p.value : 0.3982783
Result : Difference is not statistically significant.
-------------------------------------------------------------
'''
assert_allclose(soln.statistic, 0.7135182)
assert_allclose(soln.pvalue, 0.3982783)
def test_nan_policy_propogate(self):
args = [[1, 2, 3, 4], [1, np.nan]]
# default nan_policy is 'propagate'
res = stats.alexandergovern(*args)
assert_equal(res.pvalue, np.nan)
assert_equal(res.statistic, np.nan)
def test_nan_policy_raise(self):
args = [[1, 2, 3, 4], [1, np.nan]]
with assert_raises(ValueError, match="The input contains nan values"):
stats.alexandergovern(*args, nan_policy='raise')
def test_nan_policy_omit(self):
args_nan = [[1, 2, 3, None, 4], [1, np.nan, 19, 25]]
args_no_nan = [[1, 2, 3, 4], [1, 19, 25]]
res_nan = stats.alexandergovern(*args_nan, nan_policy='omit')
res_no_nan = stats.alexandergovern(*args_no_nan)
assert_equal(res_nan.pvalue, res_no_nan.pvalue)
assert_equal(res_nan.statistic, res_no_nan.statistic)
def test_constant_input(self):
# Zero variance input, consistent with `stats.pearsonr`
msg = "An input array is constant; the statistic is not defined."
with assert_warns(stats.ConstantInputWarning, match=msg):
res = stats.alexandergovern([0.667, 0.667, 0.667],
[0.123, 0.456, 0.789])
assert_equal(res.statistic, np.nan)
assert_equal(res.pvalue, np.nan)
class TestFOneWay:
def test_trivial(self):
# A trivial test of stats.f_oneway, with F=0.
F, p = stats.f_oneway([0, 2], [0, 2])
assert_equal(F, 0.0)
assert_equal(p, 1.0)
def test_basic(self):
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
F, p = stats.f_oneway([0, 2], [2, 4])
assert_equal(F, 2.0)
assert_allclose(p, 1 - np.sqrt(0.5), rtol=1e-14)
def test_known_exact(self):
# Another trivial dataset for which the exact F and p can be
# calculated.
F, p = stats.f_oneway([2], [2], [2, 3, 4])
# The use of assert_equal might be too optimistic, but the calculation
# in this case is trivial enough that it is likely to go through with
# no loss of precision.
assert_equal(F, 3/5)
assert_equal(p, 5/8)
def test_large_integer_array(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
F, p = stats.f_oneway(a, b)
# The expected value was verified by computing it with mpmath with
# 40 digits of precision.
assert_allclose(F, 0.77450216931805540, rtol=1e-14)
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = stats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nist(self):
# These are the nist ANOVA files. They can be found at:
# https://www.itl.nist.gov/div898/strd/anova/anova.html
filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',
'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',
'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']
for test_case in filenames:
rtol = 1e-7
fname = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/nist_anova', test_case))
with open(fname) as f:
content = f.read().split('\n')
certified = [line.split() for line in content[40:48]
if line.strip()]
dataf = np.loadtxt(fname, skiprows=60)
y, x = dataf.T
y = y.astype(int)
caty = np.unique(y)
f = float(certified[0][-1])
xlist = [x[y == i] for i in caty]
res = stats.f_oneway(*xlist)
# With the hard test cases we relax the tolerance a bit.
hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')
if test_case in hard_tc:
rtol = 1e-4
assert_allclose(res[0], f, rtol=rtol,
err_msg='Failing testcase: %s' % test_case)
@pytest.mark.parametrize("a, b, expected", [
(np.array([42, 42, 42]), np.array([7, 7, 7]), (np.inf, 0)),
(np.array([42, 42, 42]), np.array([42, 42, 42]), (np.nan, np.nan))
])
def test_constant_input(self, a, b, expected):
# For more details, look on https://github.com/scipy/scipy/issues/11669
msg = "Each of the input arrays is constant;"
with assert_warns(stats.ConstantInputWarning, match=msg):
f, p = stats.f_oneway(a, b)
assert f, p == expected
@pytest.mark.parametrize('axis', [-2, -1, 0, 1])
def test_2d_inputs(self, axis):
a = np.array([[1, 4, 3, 3],
[2, 5, 3, 3],
[3, 6, 3, 3],
[2, 3, 3, 3],
[1, 4, 3, 3]])
b = np.array([[3, 1, 5, 3],
[4, 6, 5, 3],
[4, 3, 5, 3],
[1, 5, 5, 3],
[5, 5, 5, 3],
[2, 3, 5, 3],
[8, 2, 5, 3],
[2, 2, 5, 3]])
c = np.array([[4, 3, 4, 3],
[4, 2, 4, 3],
[5, 4, 4, 3],
[5, 4, 4, 3]])
if axis in [-1, 1]:
a = a.T
b = b.T
c = c.T
take_axis = 0
else:
take_axis = 1
warn_msg = "Each of the input arrays is constant;"
with assert_warns(stats.ConstantInputWarning, match=warn_msg):
f, p = stats.f_oneway(a, b, c, axis=axis)
# Verify that the result computed with the 2d arrays matches
# the result of calling f_oneway individually on each slice.
for j in [0, 1]:
fj, pj = stats.f_oneway(np.take(a, j, take_axis),
np.take(b, j, take_axis),
np.take(c, j, take_axis))
assert_allclose(f[j], fj, rtol=1e-14)
assert_allclose(p[j], pj, rtol=1e-14)
for j in [2, 3]:
with assert_warns(stats.ConstantInputWarning, match=warn_msg):
fj, pj = stats.f_oneway(np.take(a, j, take_axis),
np.take(b, j, take_axis),
np.take(c, j, take_axis))
assert_equal(f[j], fj)
assert_equal(p[j], pj)
def test_3d_inputs(self):
# Some 3-d arrays. (There is nothing special about the values.)
a = 1/np.arange(1.0, 4*5*7 + 1).reshape(4, 5, 7)
b = 2/np.arange(1.0, 4*8*7 + 1).reshape(4, 8, 7)
c = np.cos(1/np.arange(1.0, 4*4*7 + 1).reshape(4, 4, 7))
f, p = stats.f_oneway(a, b, c, axis=1)
assert f.shape == (4, 7)
assert p.shape == (4, 7)
for i in range(a.shape[0]):
for j in range(a.shape[2]):
fij, pij = stats.f_oneway(a[i, :, j], b[i, :, j], c[i, :, j])
assert_allclose(fij, f[i, j])
assert_allclose(pij, p[i, j])
def test_length0_1d_error(self):
# Require at least one value in each group.
msg = 'all input arrays have length 1.'
with assert_warns(stats.DegenerateDataWarning, match=msg):
result = stats.f_oneway([1, 2, 3], [], [4, 5, 6, 7])
assert_equal(result, (np.nan, np.nan))
def test_length0_2d_error(self):
msg = 'all input arrays have length 1.'
with assert_warns(stats.DegenerateDataWarning, match=msg):
ncols = 3
a = np.ones((4, ncols))
b = np.ones((0, ncols))
c = np.ones((5, ncols))
f, p = stats.f_oneway(a, b, c)
nans = np.full((ncols,), fill_value=np.nan)
assert_equal(f, nans)
assert_equal(p, nans)
def test_all_length_one(self):
msg = 'all input arrays have length 1.'
with assert_warns(stats.DegenerateDataWarning, match=msg):
result = stats.f_oneway([10], [11], [12], [13])
assert_equal(result, (np.nan, np.nan))
@pytest.mark.parametrize('args', [(), ([1, 2, 3],)])
def test_too_few_inputs(self, args):
with assert_raises(TypeError):
stats.f_oneway(*args)
def test_axis_error(self):
a = np.ones((3, 4))
b = np.ones((5, 4))
with assert_raises(np.AxisError):
stats.f_oneway(a, b, axis=2)
def test_bad_shapes(self):
a = np.ones((3, 4))
b = np.ones((5, 4))
with assert_raises(ValueError):
stats.f_oneway(a, b, axis=1)
class TestKruskal:
def test_simple(self):
x = [1]
y = [2]
h, p = stats.kruskal(x, y)
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
def test_basic(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
h, p = stats.kruskal(x, y)
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
def test_simple_tie(self):
x = [1]
y = [1, 2]
h_uncorr = 1.5**2 + 2*2.25**2 - 12
corr = 0.75
expected = h_uncorr / corr # 0.5
h, p = stats.kruskal(x, y)
# Since the expression is simple and the exact answer is 0.5, it
# should be safe to use assert_equal().
assert_equal(h, expected)
def test_another_tie(self):
x = [1, 1, 1, 2]
y = [2, 2, 2, 2]
h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr
h, p = stats.kruskal(x, y)
assert_approx_equal(h, expected)
def test_three_groups(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = [2, 2]
h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr # 7.0
h, p = stats.kruskal(x, y, z)
assert_approx_equal(h, expected)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 2))
def test_empty(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = []
assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan))
def test_kruskal_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = stats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kruskal(x, x), (np.nan, np.nan))
assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0))
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar')
def test_large_no_samples(self):
# Test to see if large samples are handled correctly.
n = 50000
x = np.random.randn(n)
y = np.random.randn(n) + 50
h, p = stats.kruskal(x, y)
expected = 0
assert_approx_equal(p, expected)
class TestCombinePvalues:
def test_fisher(self):
# Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example
xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')
assert_approx_equal(p, 0.02156, significant=4)
def test_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')
assert_approx_equal(p, 0.01651, significant=4)
def test_stouffer2(self):
Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')
assert_approx_equal(p, 0.5, significant=4)
def test_weighted_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.ones(3))
assert_approx_equal(p, 0.01651, significant=4)
def test_weighted_stouffer2(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.array((1, 4, 9)))
assert_approx_equal(p, 0.1464, significant=4)
def test_pearson(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='pearson')
assert_approx_equal(p, 0.02213, significant=4)
def test_tippett(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='tippett')
assert_approx_equal(p, 0.0297, significant=4)
def test_mudholkar_george(self):
Z, p = stats.combine_pvalues([.1, .1, .1], method='mudholkar_george')
assert_approx_equal(p, 0.019462, significant=4)
def test_mudholkar_george_equal_fisher_pearson_average(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george')
Z_f, p_f = stats.combine_pvalues([.01, .2, .3], method='fisher')
Z_p, p_p = stats.combine_pvalues([.01, .2, .3], method='pearson')
assert_approx_equal(0.5 * (Z_f+Z_p), Z, significant=4)
methods = ["fisher", "pearson", "tippett", "stouffer", "mudholkar_george"]
@pytest.mark.parametrize("variant", ["single", "all", "random"])
@pytest.mark.parametrize("method", methods)
def test_monotonicity(self, variant, method):
# Test that result increases monotonically with respect to input.
m, n = 10, 7
rng = np.random.default_rng(278448169958891062669391462690811630763)
# `pvaluess` is an m × n array of p values. Each row corresponds to
# a set of p values to be combined with p values increasing
# monotonically down one column (single), simultaneously down each
# column (all), or independently down each column (random).
if variant == "single":
pvaluess = np.full((m, n), rng.random(n))
pvaluess[:, 0] = np.linspace(0.1, 0.9, m)
elif variant == "all":
pvaluess = np.full((n, m), np.linspace(0.1, 0.9, m)).T
elif variant == "random":
pvaluess = np.sort(rng.uniform(0, 1, size=(m, n)), axis=0)
combined_pvalues = [
stats.combine_pvalues(pvalues, method=method)[1]
for pvalues in pvaluess
]
assert np.all(np.diff(combined_pvalues) >= 0)
@pytest.mark.parametrize("method", methods)
def test_result(self, method):
res = stats.combine_pvalues([.01, .2, .3], method=method)
assert_equal((res.statistic, res.pvalue), res)
class TestCdfDistanceValidation:
"""
Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors
for bad inputs.
"""
def test_distinct_value_and_weight_lengths(self):
# When the number of weights does not match the number of values,
# a ValueError should be raised.
assert_raises(ValueError, stats.wasserstein_distance,
[1], [2], [4], [3, 1])
assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0])
def test_zero_weight(self):
# When a distribution is given zero weight, a ValueError should be
# raised.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [0, 0])
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [3, 1], [0])
def test_negative_weights(self):
# A ValueError should be raised if there are any negative weights.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2, 2], [1, 1], [3, -1])
def test_empty_distribution(self):
# A ValueError should be raised when trying to measure the distance
# between something and nothing.
assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2])
assert_raises(ValueError, stats.wasserstein_distance, [1], [])
def test_inf_weight(self):
# An inf weight is not valid.
assert_raises(ValueError, stats.wasserstein_distance,
[1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1])
class TestWassersteinDistance:
""" Tests for wasserstein_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the Wasserstein distance is
# straightforward.
assert_almost_equal(
stats.wasserstein_distance([0, 1], [0], [1, 1], [1]),
.5)
assert_almost_equal(stats.wasserstein_distance(
[0, 1], [0], [3, 1], [1]),
.25)
assert_almost_equal(stats.wasserstein_distance(
[0, 2], [0], [1, 1], [1]),
1)
assert_almost_equal(stats.wasserstein_distance(
[0, 1, 2], [1, 2, 3]),
1)
def test_published_values(self):
# Compare against published values and manually computed results.
# The values and computed result are posted at James D. McCaffrey's blog,
# https://jamesmccaffrey.wordpress.com/2018/03/05/earth-mover-distance
# -wasserstein-metric-example-calculation/
u = [(1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1),
(4,2), (6,1), (6,1)]
v = [(2,1), (2,1), (3,2), (3,2), (3,2), (5,1), (5,1), (5,1), (5,1), (5,1),
(5,1), (5,1), (7,1)]
res = stats.wasserstein_distance(u, v)
# In original post, the author kept two decimal places for ease of calculation.
# This test uses the more precise value of distance to get the precise results.
# For comparison, please see the table and figure in the original blog post.
flow = np.array([2., 3., 5., 1., 1., 1.])
dist = np.array([1.00, 5**0.5, 4.00, 2**0.5, 1.00, 1.00])
ref = np.sum(flow * dist)/np.sum(flow)
assert_almost_equal(res, ref)
def test_same_distribution(self):
# Any distribution moved to itself should have a Wasserstein distance
# of zero.
assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.wasserstein_distance([1, 1, 1, 4], [4, 1],
[1, 1, 1, 1], [1, 3]),
0)
@pytest.mark.parametrize('n_value', (4, 15, 35))
@pytest.mark.parametrize('ndim', (3, 4, 7))
@pytest.mark.parametrize('max_repeats', (5, 10))
def test_same_distribution_nD(self, ndim, n_value, max_repeats):
# Any distribution moved to itself should have a Wasserstein distance
# of zero.
rng = np.random.default_rng(363836384995579937222333)
repeats = rng.integers(1, max_repeats, size=n_value, dtype=int)
u_values = rng.random(size=(n_value, ndim))
v_values = np.repeat(u_values, repeats, axis=0)
v_weights = rng.random(np.sum(repeats))
range_repeat = np.repeat(np.arange(len(repeats)), repeats)
u_weights = np.bincount(range_repeat, weights=v_weights)
index = rng.permutation(len(v_weights))
v_values, v_weights = v_values[index], v_weights[index]
res = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
assert_allclose(res, 0, atol=1e-15)
def test_shift(self):
# If the whole distribution is shifted by x, then the Wasserstein
# distance should be the norm of x.
assert_almost_equal(stats.wasserstein_distance([0], [1]), 1)
assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10)
assert_almost_equal(
stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]),
10)
assert_almost_equal(
stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2],
[3, 1, 1], [1, 3, 1]),
2.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.wasserstein_distance(
[0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.wasserstein_distance([5, 0, 1], [0, 4, 3],
[1, 2, 4], [1, 2, 4]))
def test_collapse(self):
# Collapsing a distribution to a point distribution at zero is
# equivalent to taking the average of the absolute values of the
# values.
u = np.arange(-10, 30, 0.3)
v = np.zeros_like(u)
assert_almost_equal(
stats.wasserstein_distance(u, v),
np.mean(np.abs(u)))
u_weights = np.arange(len(u))
v_weights = u_weights[::-1]
assert_almost_equal(
stats.wasserstein_distance(u, v, u_weights, v_weights),
np.average(np.abs(u), weights=u_weights))
@pytest.mark.parametrize('nu', (8, 9, 38))
@pytest.mark.parametrize('nv', (8, 12, 17))
@pytest.mark.parametrize('ndim', (3, 5, 23))
def test_collapse_nD(self, nu, nv, ndim):
# test collapse for n dimensional values
# Collapsing a n-D distribution to a point distribution at zero
# is equivalent to taking the average of the norm of data.
rng = np.random.default_rng(38573488467338826109)
u_values = rng.random(size=(nu, ndim))
v_values = np.zeros((nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = np.average(np.linalg.norm(u_values, axis=1), weights=u_weights)
res = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
assert_almost_equal(res, ref)
def test_zero_weight(self):
# Values with zero weight have no impact on the Wasserstein distance.
assert_almost_equal(
stats.wasserstein_distance([1, 2, 100000], [1, 1],
[1, 1, 0], [1, 1]),
stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1]))
@pytest.mark.parametrize('nu', (8, 16, 32))
@pytest.mark.parametrize('nv', (8, 16, 32))
@pytest.mark.parametrize('ndim', (1, 2, 6))
def test_zero_weight_nD(self, nu, nv, ndim):
# Values with zero weight have no impact on the Wasserstein distance.
rng = np.random.default_rng(38573488467338826109)
u_values = rng.random(size=(nu, ndim))
v_values = rng.random(size=(nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
add_row, nrows = rng.integers(0, nu, size=2)
add_value = rng.random(size=(nrows, ndim))
u_values = np.insert(u_values, add_row, add_value, axis=0)
u_weights = np.insert(u_weights, add_row, np.zeros(nrows), axis=0)
res = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
assert_almost_equal(res, ref)
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [1, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
uv, vv, uw = [[1, 1], [2, 1]], [[np.inf, -np.inf]], [1, 1]
distance = stats.wasserstein_distance(uv, vv, uw)
assert_equal(distance, np.inf)
with np.errstate(invalid='ignore'):
uv, vv = [[np.inf, np.inf]], [[np.inf, -np.inf]]
distance = stats.wasserstein_distance(uv, vv)
assert_equal(distance, np.nan)
@pytest.mark.parametrize('nu', (10, 15, 20))
@pytest.mark.parametrize('nv', (10, 15, 20))
@pytest.mark.parametrize('ndim', (1, 3, 5))
def test_multi_dim_nD(self, nu, nv, ndim):
# Adding dimension on distributions do not affect the result
rng = np.random.default_rng(2736495738494849509)
u_values = rng.random(size=(nu, ndim))
v_values = rng.random(size=(nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
add_dim = rng.integers(0, ndim)
add_value = rng.random()
u_values = np.insert(u_values, add_dim, add_value, axis=1)
v_values = np.insert(v_values, add_dim, add_value, axis=1)
res = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
assert_almost_equal(res, ref)
@pytest.mark.parametrize('nu', (7, 13, 19))
@pytest.mark.parametrize('nv', (7, 13, 19))
@pytest.mark.parametrize('ndim', (2, 4, 7))
def test_orthogonal_nD(self, nu, nv, ndim):
# orthogonal transformations do not affect the result of the
# wasserstein_distance
rng = np.random.default_rng(34746837464536)
u_values = rng.random(size=(nu, ndim))
v_values = rng.random(size=(nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = stats.wasserstein_distance(u_values, v_values, u_weights, v_weights)
dist = stats.ortho_group(ndim)
transform = dist.rvs(random_state=rng)
shift = rng.random(size=ndim)
res = stats.wasserstein_distance(u_values @ transform + shift,
v_values @ transform + shift,
u_weights, v_weights)
assert_almost_equal(res, ref)
def test_error_code(self):
rng = np.random.default_rng(52473644737485644836320101)
with pytest.raises(ValueError, match='Invalid input values. The inputs'):
u_values = rng.random(size=(4, 10, 15))
v_values = rng.random(size=(6, 2, 7))
_ = stats.wasserstein_distance(u_values, v_values)
with pytest.raises(ValueError, match='Invalid input values. Dimensions'):
u_values = rng.random(size=(15,))
v_values = rng.random(size=(3, 15))
_ = stats.wasserstein_distance(u_values, v_values)
with pytest.raises(ValueError, match='Invalid input values. If two-dimensional'):
u_values = rng.random(size=(2, 10))
v_values = rng.random(size=(2, 2))
_ = stats.wasserstein_distance(u_values, v_values)
class TestEnergyDistance:
""" Tests for energy_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the energy distance is
# straightforward.
assert_almost_equal(
stats.energy_distance([0, 1], [0], [1, 1], [1]),
np.sqrt(2) * .5)
assert_almost_equal(stats.energy_distance(
[0, 1], [0], [3, 1], [1]),
np.sqrt(2) * .25)
assert_almost_equal(stats.energy_distance(
[0, 2], [0], [1, 1], [1]),
2 * .5)
assert_almost_equal(
stats.energy_distance([0, 1, 2], [1, 2, 3]),
np.sqrt(2) * (3*(1./3**2))**.5)
def test_same_distribution(self):
# Any distribution moved to itself should have a energy distance of
# zero.
assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If a single-point distribution is shifted by x, then the energy
# distance should be sqrt(2) * sqrt(x).
assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2))
assert_almost_equal(
stats.energy_distance([-5], [5]),
np.sqrt(2) * 10**.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4]))
def test_zero_weight(self):
# Values with zero weight have no impact on the energy distance.
assert_almost_equal(
stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]),
stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf)
assert_equal(
stats.energy_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.energy_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.energy_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
class TestBrunnerMunzel:
# Data from (Lumley, 1996)
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
def test_brunnermunzel_one_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less')
u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater')
u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater')
u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less')
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(p3, p4, significant=self.significant)
assert_(p1 != p3)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(u3, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u4, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0028931043330757342,
significant=self.significant)
assert_approx_equal(p3, 0.99710689566692423,
significant=self.significant)
def test_brunnermunzel_two_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided')
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_default(self):
# The default value for alternative is two-sided
u1, p1 = stats.brunnermunzel(self.X, self.Y)
u2, p2 = stats.brunnermunzel(self.Y, self.X)
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_alternative_error(self):
alternative = "error"
distribution = "t"
nan_policy = "propagate"
assert_(alternative not in ["two-sided", "greater", "less"])
assert_raises(ValueError,
stats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_distribution_norm(self):
u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution="normal")
u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution="normal")
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0017041417600383024,
significant=self.significant)
def test_brunnermunzel_distribution_error(self):
alternative = "two-sided"
distribution = "error"
nan_policy = "propagate"
assert_(alternative not in ["t", "normal"])
assert_raises(ValueError,
stats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_empty_imput(self):
u1, p1 = stats.brunnermunzel(self.X, [])
u2, p2 = stats.brunnermunzel([], self.Y)
u3, p3 = stats.brunnermunzel([], [])
assert_equal(u1, np.nan)
assert_equal(p1, np.nan)
assert_equal(u2, np.nan)
assert_equal(p2, np.nan)
assert_equal(u3, np.nan)
assert_equal(p3, np.nan)
def test_brunnermunzel_nan_input_propagate(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
u1, p1 = stats.brunnermunzel(X, Y, nan_policy="propagate")
u2, p2 = stats.brunnermunzel(Y, X, nan_policy="propagate")
assert_equal(u1, np.nan)
assert_equal(p1, np.nan)
assert_equal(u2, np.nan)
assert_equal(p2, np.nan)
def test_brunnermunzel_nan_input_raise(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
alternative = "two-sided"
distribution = "t"
nan_policy = "raise"
assert_raises(ValueError,
stats.brunnermunzel,
X,
Y,
alternative,
distribution,
nan_policy)
assert_raises(ValueError,
stats.brunnermunzel,
Y,
X,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_nan_input_omit(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
u1, p1 = stats.brunnermunzel(X, Y, nan_policy="omit")
u2, p2 = stats.brunnermunzel(Y, X, nan_policy="omit")
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_return_nan(self):
""" tests that a warning is emitted when p is nan
p-value with t-distributions can be nan (0/0) (see gh-15843)
"""
x = [1, 2, 3]
y = [5, 6, 7, 8, 9]
with pytest.warns(RuntimeWarning, match='p-value cannot be estimated'):
stats.brunnermunzel(x, y, distribution="t")
def test_brunnermunzel_normal_dist(self):
""" tests that a p is 0 for datasets that cause p->nan
when t-distribution is used (see gh-15843)
"""
x = [1, 2, 3]
y = [5, 6, 7, 8, 9]
with pytest.warns(RuntimeWarning, match='divide by zero'):
_, p = stats.brunnermunzel(x, y, distribution="normal")
assert_equal(p, 0)
class TestRatioUniforms:
""" Tests for rvs_ratio_uniforms are in test_sampling.py,
as rvs_ratio_uniforms is deprecated and moved to stats.sampling """
def test_consistency(self):
f = stats.norm.pdf
v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax = np.sqrt(f(0))
gen = stats.sampling.RatioUniforms(f, umax=umax, vmin=-v, vmax=v,
random_state=12345)
r1 = gen.rvs(10)
deprecation_msg = ("Please use `RatioUniforms` from the "
"`scipy.stats.sampling` namespace.")
with pytest.warns(DeprecationWarning, match=deprecation_msg):
r2 = stats.rvs_ratio_uniforms(f, umax, -v, v, size=10,
random_state=12345)
assert_equal(r1, r2)
class TestMGCErrorWarnings:
""" Tests errors and warnings derived from MGC.
"""
def test_error_notndarray(self):
# raises error if x or y is not a ndarray
x = np.arange(20)
y = [5] * 20
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
assert_raises(ValueError, stats.multiscale_graphcorr, y, x)
def test_error_shape(self):
# raises error if number of samples different (n)
x = np.arange(100).reshape(25, 4)
y = x.reshape(10, 10)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_lowsamples(self):
# raises error if samples are low (< 3)
x = np.arange(3)
y = np.arange(3)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_nans(self):
# raises error if inputs contain NaNs
x = np.arange(20, dtype=float)
x[0] = np.nan
assert_raises(ValueError, stats.multiscale_graphcorr, x, x)
y = np.arange(20)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_wrongdisttype(self):
# raises error if metric is not a function
x = np.arange(20)
compute_distance = 0
assert_raises(ValueError, stats.multiscale_graphcorr, x, x,
compute_distance=compute_distance)
@pytest.mark.parametrize("reps", [
-1, # reps is negative
'1', # reps is not integer
])
def test_error_reps(self, reps):
# raises error if reps is negative
x = np.arange(20)
assert_raises(ValueError, stats.multiscale_graphcorr, x, x, reps=reps)
def test_warns_reps(self):
# raises warning when reps is less than 1000
x = np.arange(20)
reps = 100
assert_warns(RuntimeWarning, stats.multiscale_graphcorr, x, x, reps=reps)
def test_error_infty(self):
# raises error if input contains infinities
x = np.arange(20)
y = np.ones(20) * np.inf
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
class TestMGCStat:
""" Test validity of MGC test statistic
"""
def _simulations(self, samps=100, dims=1, sim_type=""):
# linear simulation
if sim_type == "linear":
x = np.random.uniform(-1, 1, size=(samps, 1))
y = x + 0.3 * np.random.random_sample(size=(x.size, 1))
# spiral simulation
elif sim_type == "nonlinear":
unif = np.array(np.random.uniform(0, 5, size=(samps, 1)))
x = unif * np.cos(np.pi * unif)
y = (unif * np.sin(np.pi * unif) +
0.4*np.random.random_sample(size=(x.size, 1)))
# independence (tests type I simulation)
elif sim_type == "independence":
u = np.random.normal(0, 1, size=(samps, 1))
v = np.random.normal(0, 1, size=(samps, 1))
u_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
v_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
x = u/3 + 2*u_2 - 1
y = v/3 + 2*v_2 - 1
# raises error if not approved sim_type
else:
raise ValueError("sim_type must be linear, nonlinear, or "
"independence")
# add dimensions of noise for higher dimensions
if dims > 1:
dims_noise = np.random.normal(0, 1, size=(samps, dims-1))
x = np.concatenate((x, dims_noise), axis=1)
return x, y
@pytest.mark.slow
@pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
("linear", 0.97, 1/1000), # test linear simulation
("nonlinear", 0.163, 1/1000), # test spiral simulation
("independence", -0.0094, 0.78) # test independence simulation
])
def test_oned(self, sim_type, obs_stat, obs_pvalue):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type=sim_type)
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
assert_approx_equal(stat, obs_stat, significant=1)
assert_approx_equal(pvalue, obs_pvalue, significant=1)
@pytest.mark.slow
@pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
("linear", 0.184, 1/1000), # test linear simulation
("nonlinear", 0.0190, 0.117), # test spiral simulation
])
def test_fived(self, sim_type, obs_stat, obs_pvalue):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=5, sim_type=sim_type)
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
assert_approx_equal(stat, obs_stat, significant=1)
assert_approx_equal(pvalue, obs_pvalue, significant=1)
@pytest.mark.xslow
def test_twosamp(self):
np.random.seed(12345678)
# generate x and y
x = np.random.binomial(100, 0.5, size=(100, 5))
y = np.random.normal(0, 1, size=(80, 5))
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
assert_approx_equal(stat, 1.0, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
# generate x and y
y = np.random.normal(0, 1, size=(100, 5))
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y, is_twosamp=True)
assert_approx_equal(stat, 1.0, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
@pytest.mark.slow
def test_workers(self):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="linear")
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y, workers=2)
assert_approx_equal(stat, 0.97, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
@pytest.mark.slow
def test_random_state(self):
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="linear")
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
assert_approx_equal(stat, 0.97, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
@pytest.mark.slow
def test_dist_perm(self):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="nonlinear")
distx = cdist(x, x, metric="euclidean")
disty = cdist(y, y, metric="euclidean")
stat_dist, pvalue_dist, _ = stats.multiscale_graphcorr(distx, disty,
compute_distance=None,
random_state=1)
assert_approx_equal(stat_dist, 0.163, significant=1)
assert_approx_equal(pvalue_dist, 0.001, significant=1)
@pytest.mark.slow
def test_pvalue_literature(self):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="linear")
# test stat and pvalue
_, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
assert_allclose(pvalue, 1/1001)
@pytest.mark.slow
def test_alias(self):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="linear")
res = stats.multiscale_graphcorr(x, y, random_state=1)
assert_equal(res.stat, res.statistic)
class TestPageTrendTest:
# expected statistic and p-values generated using R at
# https://rdrr.io/cran/cultevo/, e.g.
# library(cultevo)
# data = rbind(c(72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81, 43,
# 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50),
# c(68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31, 67,
# 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43),
# c(81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68, 17,
# 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55))
# result = page.test(data, verbose=FALSE)
# Most test cases generated to achieve common critical p-values so that
# results could be checked (to limited precision) against tables in
# scipy.stats.page_trend_test reference [1]
np.random.seed(0)
data_3_25 = np.random.rand(3, 25)
data_10_26 = np.random.rand(10, 26)
ts = [
(12805, 0.3886487053947608, False, 'asymptotic', data_3_25),
(49140, 0.02888978556179862, False, 'asymptotic', data_10_26),
(12332, 0.7722477197436702, False, 'asymptotic',
[[72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81,
43, 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50],
[68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31,
67, 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43],
[81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68,
17, 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55]]),
(266, 4.121656378600823e-05, False, 'exact',
[[1.5, 4., 8.3, 5, 19, 11],
[5, 4, 3.5, 10, 20, 21],
[8.4, 3.2, 10, 12, 14, 15]]),
(332, 0.9566400920502488, True, 'exact',
[[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1],
[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1],
[3, 4, 1, 2], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4]]),
(241, 0.9622210164861476, True, 'exact',
[[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3],
[1, 2, 3], [1, 2, 3], [1, 2, 3]]),
(197, 0.9619432897162209, True, 'exact',
[[6, 5, 4, 3, 2, 1], [6, 5, 4, 3, 2, 1], [1, 3, 4, 5, 2, 6]]),
(423, 0.9590458306880073, True, 'exact',
[[5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1],
[5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1],
[4, 1, 3, 2, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]]),
(217, 0.9693058575034678, True, 'exact',
[[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3],
[1, 2, 3]]),
(395, 0.991530289351305, True, 'exact',
[[7, 6, 5, 4, 3, 2, 1], [7, 6, 5, 4, 3, 2, 1],
[6, 5, 7, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7]]),
(117, 0.9997817843373017, True, 'exact',
[[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [3, 2, 1], [3, 2, 1], [2, 1, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("L, p, ranked, method, data", ts)
def test_accuracy(self, L, p, ranked, method, data):
np.random.seed(42)
res = stats.page_trend_test(data, ranked=ranked, method=method)
assert_equal(L, res.statistic)
assert_allclose(p, res.pvalue)
assert_equal(method, res.method)
ts2 = [
(542, 0.9481266260876332, True, 'exact',
[[10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[1, 8, 4, 7, 6, 5, 9, 3, 2, 10]]),
(1322, 0.9993113928199309, True, 'exact',
[[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 2, 8, 7, 6, 5, 4, 3, 10, 1],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]),
(2286, 0.9908688345484833, True, 'exact',
[[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [1, 3, 5, 6, 4, 7, 2, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8]]),
]
# only the first of these appears slow because intermediate data are
# cached and used on the rest
@pytest.mark.parametrize("L, p, ranked, method, data", ts)
@pytest.mark.slow()
def test_accuracy2(self, L, p, ranked, method, data):
np.random.seed(42)
res = stats.page_trend_test(data, ranked=ranked, method=method)
assert_equal(L, res.statistic)
assert_allclose(p, res.pvalue)
assert_equal(method, res.method)
def test_options(self):
np.random.seed(42)
m, n = 10, 20
predicted_ranks = np.arange(1, n+1)
perm = np.random.permutation(np.arange(n))
data = np.random.rand(m, n)
ranks = stats.rankdata(data, axis=1)
res1 = stats.page_trend_test(ranks)
res2 = stats.page_trend_test(ranks, ranked=True)
res3 = stats.page_trend_test(data, ranked=False)
res4 = stats.page_trend_test(ranks, predicted_ranks=predicted_ranks)
res5 = stats.page_trend_test(ranks[:, perm],
predicted_ranks=predicted_ranks[perm])
assert_equal(res1.statistic, res2.statistic)
assert_equal(res1.statistic, res3.statistic)
assert_equal(res1.statistic, res4.statistic)
assert_equal(res1.statistic, res5.statistic)
def test_Ames_assay(self):
# test from _page_trend_test.py [2] page 151; data on page 144
np.random.seed(42)
data = [[101, 117, 111], [91, 90, 107], [103, 133, 121],
[136, 140, 144], [190, 161, 201], [146, 120, 116]]
data = np.array(data).T
predicted_ranks = np.arange(1, 7)
res = stats.page_trend_test(data, ranked=False,
predicted_ranks=predicted_ranks,
method="asymptotic")
assert_equal(res.statistic, 257)
assert_almost_equal(res.pvalue, 0.0035, decimal=4)
res = stats.page_trend_test(data, ranked=False,
predicted_ranks=predicted_ranks,
method="exact")
assert_equal(res.statistic, 257)
assert_almost_equal(res.pvalue, 0.0023, decimal=4)
def test_input_validation(self):
# test data not a 2d array
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test(None)
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test([])
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test([1, 2])
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test([[[1]]])
# test invalid dimensions
with assert_raises(ValueError, match="Page's L is only appropriate"):
stats.page_trend_test(np.random.rand(1, 3))
with assert_raises(ValueError, match="Page's L is only appropriate"):
stats.page_trend_test(np.random.rand(2, 2))
# predicted ranks must include each integer [1, 2, 3] exactly once
message = "`predicted_ranks` must include each integer"
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks=[0, 1, 2])
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks=[1.1, 2, 3])
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks=[1, 2, 3, 3])
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks="invalid")
# test improperly ranked data
with assert_raises(ValueError, match="`data` is not properly ranked"):
stats.page_trend_test([[0, 2, 3], [1, 2, 3]], True)
with assert_raises(ValueError, match="`data` is not properly ranked"):
stats.page_trend_test([[1, 2, 3], [1, 2, 4]], True)
# various
with assert_raises(ValueError, match="`data` contains NaNs"):
stats.page_trend_test([[1, 2, 3], [1, 2, np.nan]],
ranked=False)
with assert_raises(ValueError, match="`method` must be in"):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
method="ekki")
with assert_raises(TypeError, match="`ranked` must be boolean."):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
ranked="ekki")
rng = np.random.default_rng(902340982)
x = rng.random(10)
y = rng.random(10)
@pytest.mark.parametrize("fun, args",
[(stats.wilcoxon, (x,)),
(stats.ks_1samp, (x, stats.norm.cdf)), # type: ignore[attr-defined] # noqa
(stats.ks_2samp, (x, y)),
(stats.kstest, (x, y)),
])
def test_rename_mode_method(fun, args):
res = fun(*args, method='exact')
res2 = fun(*args, mode='exact')
assert_equal(res, res2)
err = rf"{fun.__name__}() got multiple values for argument"
with pytest.raises(TypeError, match=re.escape(err)):
fun(*args, method='exact', mode='exact')
class TestExpectile:
def test_same_as_mean(self):
rng = np.random.default_rng(42)
x = rng.random(size=20)
assert_allclose(stats.expectile(x, alpha=0.5), np.mean(x))
def test_minimum(self):
rng = np.random.default_rng(42)
x = rng.random(size=20)
assert_allclose(stats.expectile(x, alpha=0), np.amin(x))
def test_maximum(self):
rng = np.random.default_rng(42)
x = rng.random(size=20)
assert_allclose(stats.expectile(x, alpha=1), np.amax(x))
def test_weights(self):
# expectile should minimize `fun` defined below; see
# F. Sobotka and T. Kneib, "Geoadditive expectile regression",
# Computational Statistics and Data Analysis 56 (2012) 755-767
# :doi:`10.1016/j.csda.2010.11.015`
rng = np.random.default_rng(1856392524598679138)
def fun(u, a, alpha, weights):
w = np.full_like(a, fill_value=alpha)
w[a <= u] = 1 - alpha
return np.sum(w * weights * (a - u)**2)
def expectile2(a, alpha, weights):
bracket = np.min(a), np.max(a)
return optimize.minimize_scalar(fun, bracket=bracket,
args=(a, alpha, weights)).x
n = 10
a = rng.random(n)
alpha = rng.random()
weights = rng.random(n)
res = stats.expectile(a, alpha, weights=weights)
ref = expectile2(a, alpha, weights)
assert_allclose(res, ref)
@pytest.mark.parametrize(
"alpha", [0.2, 0.5 - 1e-12, 0.5, 0.5 + 1e-12, 0.8]
)
@pytest.mark.parametrize("n", [20, 2000])
def test_expectile_properties(self, alpha, n):
"""
See Section 6 of
I. Steinwart, C. Pasin, R.C. Williamson & S. Zhang (2014).
"Elicitation and Identification of Properties". COLT.
http://proceedings.mlr.press/v35/steinwart14.html
and
Propositions 5, 6, 7 of
F. Bellini, B. Klar, and A. Müller and E. Rosazza Gianin (2013).
"Generalized Quantiles as Risk Measures"
http://doi.org/10.2139/ssrn.2225751
"""
rng = np.random.default_rng(42)
x = rng.normal(size=n)
# 0. definite / constancy
# Let T(X) denote the expectile of rv X ~ F.
# T(c) = c for constant c
for c in [-5, 0, 0.5]:
assert_allclose(
stats.expectile(np.full(shape=n, fill_value=c), alpha=alpha),
c
)
# 1. translation equivariance
# T(X + c) = T(X) + c
c = rng.exponential()
assert_allclose(
stats.expectile(x + c, alpha=alpha),
stats.expectile(x, alpha=alpha) + c,
)
assert_allclose(
stats.expectile(x - c, alpha=alpha),
stats.expectile(x, alpha=alpha) - c,
)
# 2. positively homogeneity
# T(cX) = c * T(X) for c > 0
assert_allclose(
stats.expectile(c * x, alpha=alpha),
c * stats.expectile(x, alpha=alpha),
)
# 3. subadditivity
# Note that subadditivity holds for alpha >= 0.5.
# T(X + Y) <= T(X) + T(Y)
# For alpha = 0.5, i.e. the mean, strict equality holds.
# For alpha < 0.5, one can use property 6. to show
# T(X + Y) >= T(X) + T(Y)
y = rng.logistic(size=n, loc=10) # different distibution than x
if alpha == 0.5:
def assert_op(a, b):
assert_allclose(a, b)
elif alpha > 0.5:
def assert_op(a, b):
assert a < b
else:
def assert_op(a, b):
assert a > b
assert_op(
stats.expectile(np.r_[x + y], alpha=alpha),
stats.expectile(x, alpha=alpha)
+ stats.expectile(y, alpha=alpha)
)
# 4. monotonicity
# This holds for first order stochastic dominance X:
# X >= Y whenever P(X <= x) < P(Y <= x)
# T(X) <= T(Y) whenever X <= Y
y = rng.normal(size=n, loc=5)
assert (
stats.expectile(x, alpha=alpha) <= stats.expectile(y, alpha=alpha)
)
# 5. convexity for alpha > 0.5, concavity for alpha < 0.5
# convexity is
# T((1 - c) X + c Y) <= (1 - c) T(X) + c T(Y) for 0 <= c <= 1
y = rng.logistic(size=n, loc=10)
for c in [0.1, 0.5, 0.8]:
assert_op(
stats.expectile((1-c)*x + c*y, alpha=alpha),
(1-c) * stats.expectile(x, alpha=alpha) +
c * stats.expectile(y, alpha=alpha)
)
# 6. negative argument
# T_{alpha}(-X) = -T_{1-alpha}(X)
assert_allclose(
stats.expectile(-x, alpha=alpha),
-stats.expectile(x, alpha=1-alpha),
)
@pytest.mark.parametrize("n", [20, 2000])
def test_monotonicity_in_alpha(self, n):
rng = np.random.default_rng(42)
x = rng.pareto(a=2, size=n)
e_list = []
alpha_seq = np.logspace(-15, np.log10(0.5), 100)
# sorted list of unique alpha values in interval (0, 1)
for alpha in np.r_[0, alpha_seq, 1 - alpha_seq[:-1:-1], 1]:
e_list.append(stats.expectile(x, alpha=alpha))
assert np.all(np.diff(e_list) > 0)
| 347,719
| 40.56844
| 124
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_mstats_basic.py
|
"""
Tests for the stats.mstats module (support for masked arrays)
"""
import warnings
import platform
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from .common_tests import check_named_results
import pytest
from pytest import raises as assert_raises
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_array_equal)
from numpy.testing import suppress_warnings
from scipy.stats import _mstats_basic
class TestMquantiles:
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
# Note this doesn't test when axis is not specified
x = mstats.gmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
x = stats.hmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
class TestGeoMean:
def test_1d(self):
a = [1, 2, 3, 4]
desired = np.power(1*2*3*4, 1./4.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_ma(self):
# Test a 1d masked array
a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 45.2872868812
check_equal_gmean(a, desired)
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired = np.power(1*2*3, 1./3.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_ma_value(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
desired = 41.4716627439
check_equal_gmean(a, desired)
def test_1d_ma0(self):
# Test a 1d masked array with zero element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
desired = 0
check_equal_gmean(a, desired)
def test_1d_ma_inf(self):
# Test a 1d masked array with negative element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
desired = np.nan
with np.errstate(invalid='ignore'):
check_equal_gmean(a, desired)
@pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
def test_1d_float96(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)
check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)
def test_2d_ma(self):
a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
desired = np.array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14)
desired = ma.array([np.power(1*2*3*4, 1./4.),
np.power(2*3, 1./2.),
np.power(1*4, 1./2.)])
check_equal_gmean(a, desired, axis=-1, rtol=1e-14)
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(np.ma.array(a), desired)
class TestHarMean:
def test_1d(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired = 3. / (1./1 + 1./2 + 1./3)
check_equal_hmean(a, desired, rtol=1e-14)
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 34.1417152147
check_equal_hmean(a, desired)
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
desired = 31.8137186141
check_equal_hmean(a, desired)
@pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
def test_1d_float96(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)
check_equal_hmean(a, desired_dt, dtype=np.float96)
def test_2d(self):
a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
desired = ma.array([1, 2, 3, 4])
check_equal_hmean(a, desired, axis=0, rtol=1e-14)
desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]
check_equal_hmean(a, desired, axis=-1, rtol=1e-14)
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(np.ma.array(a), desired)
class TestRanking:
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr:
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_pearsonr_misaligned_mask(self):
mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
x = np.array([1, 4, 5, 6])
y = np.array([9, 6, 5, 9])
mr, mp = mstats.pearsonr(mx, my)
r, p = stats.pearsonr(x, y)
assert_equal(mr, r)
assert_equal(mp, p)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# Next test is to make sure calculation uses sufficient precision.
# The denominator's value is ~n^3 and used to be represented as an
# int. 2000**3 > 2**32 so these arrays would cause overflow on
# some machines.
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_spearmanr_alternative(self):
# check against R
# options(digits=16)
# cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
# 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),
# c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
# 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),
# alternative='two.sided', method='spearman')
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
r_exp = 0.6887298747763864 # from cor.test
r, p = mstats.spearmanr(x, y)
assert_allclose(r, r_exp)
assert_allclose(p, 0.004519192910756)
r, p = mstats.spearmanr(x, y, alternative='greater')
assert_allclose(r, r_exp)
assert_allclose(p, 0.002259596455378)
r, p = mstats.spearmanr(x, y, alternative='less')
assert_allclose(r, r_exp)
assert_allclose(p, 0.9977404035446)
# intuitive test (with obvious positive correlation)
n = 100
x = np.linspace(0, 5, n)
y = 0.1*x + np.random.rand(n) # y is positively correlated w/ x
stat1, p1 = mstats.spearmanr(x, y)
stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
assert_allclose(p2, p1 / 2) # positive correlation -> small p
stat3, p3 = mstats.spearmanr(x, y, alternative="less")
assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p
assert stat1 == stat2 == stat3
with pytest.raises(ValueError, match="alternative must be 'less'..."):
mstats.spearmanr(x, y, alternative="ekki-ekki")
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
def test_kendalltau(self):
# check case with maximum disorder and p=1
x = ma.array(np.array([9, 2, 5, 6]))
y = ma.array(np.array([4, 7, 9, 11]))
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.0, 1.0]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# simple case without ties
x = ma.array(np.arange(10))
y = ma.array(np.arange(10))
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# check exception in case of invalid method keyword
assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# same in opposite direction
x = ma.array(np.arange(10))
y = ma.array(np.arange(10)[::-1])
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
[+0.3333333, 0.75])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
[+0.3333333, 0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
[-0.5477226, 0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
result = mstats.kendalltau(x, y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
attributes = ('correlation', 'pvalue')
check_named_results(result, attributes, ma=True)
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
@pytest.mark.slow
def test_kendalltau_large(self):
# make sure internal variable use correct precision with
# larger arrays
x = np.arange(2000, dtype=float)
x = ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
@pytest.mark.parametrize("method", ("exact", "asymptotic"))
@pytest.mark.parametrize("alternative", ("two-sided", "greater", "less"))
def test_kendalltau_mstats_vs_stats(self, method, alternative):
# Test that mstats.kendalltau and stats.kendalltau with
# nan_policy='omit' matches behavior of stats.kendalltau
# Accuracy of the alternatives is tested in stats/tests/test_stats.py
np.random.seed(0)
n = 50
x = np.random.rand(n)
y = np.random.rand(n)
mask = np.random.rand(n) > 0.5
x_masked = ma.array(x, mask=mask)
y_masked = ma.array(y, mask=mask)
res_masked = mstats.kendalltau(
x_masked, y_masked, method=method, alternative=alternative)
x_compressed = x_masked.compressed()
y_compressed = y_masked.compressed()
res_compressed = stats.kendalltau(
x_compressed, y_compressed, method=method, alternative=alternative)
x[mask] = np.nan
y[mask] = np.nan
res_nan = stats.kendalltau(
x, y, method=method, nan_policy='omit', alternative=alternative)
assert_allclose(res_masked, res_compressed)
assert_allclose(res_nan, res_compressed)
def test_kendall_p_exact_medium(self):
# Test for the exact method with medium samples (some n >= 171)
# expected values generated using SymPy
expectations = {(100, 2393): 0.62822615287956040664,
(101, 2436): 0.60439525773513602669,
(170, 0): 2.755801935583541e-307,
(171, 0): 0.0,
(171, 1): 2.755801935583541e-307,
(172, 1): 0.0,
(200, 9797): 0.74753983745929675209,
(201, 9656): 0.40959218958120363618}
for nc, expected in expectations.items():
res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
@pytest.mark.xslow
def test_kendall_p_exact_large(self):
# Test for the exact method with large samples (n >= 171)
# expected values generated using SymPy
expectations = {(400, 38965): 0.48444283672113314099,
(401, 39516): 0.66363159823474837662,
(800, 156772): 0.42265448483120932055,
(801, 157849): 0.53437553412194416236,
(1600, 637472): 0.84200727400323538419,
(1601, 630304): 0.34465255088058593946}
for nc, expected in expectations.items():
res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
def test_pointbiserial(self):
x = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, -1]
y = [14.8, 13.8, 12.4, 10.1, 7.1, 6.1, 5.8, 4.6, 4.3, 3.5, 3.3, 3.2,
3.0, 2.8, 2.8, 2.5, 2.4, 2.3, 2.1, 1.7, 1.7, 1.5, 1.3, 1.3, 1.2,
1.2, 1.1, 0.8, 0.7, 0.6, 0.5, 0.2, 0.2, 0.1, np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming:
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimr(self):
x = ma.arange(10)
result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
assert_equal(result, expected)
assert_equal(result.mask, expected.mask)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmedvar(self):
# Basic test. Additional tests of all arguments, edge cases,
# input validation, and proper treatment of masked arrays are needed.
rng = np.random.default_rng(3262323289434724460)
data_orig = rng.random(size=20)
data = np.sort(data_orig)
data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
assert_allclose(mstats.trimmed_var(data_orig, 0.1), data.var())
def test_trimmedstd(self):
# Basic test. Additional tests of all arguments, edge cases,
# input validation, and proper treatment of masked arrays are needed.
rng = np.random.default_rng(7121029245207162780)
data_orig = rng.random(size=20)
data = np.sort(data_orig)
data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
assert_allclose(mstats.trimmed_std(data_orig, 0.1), data.std())
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
assert_almost_equal(
mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
11887.3, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
def test_winsorization_nan(self):
data = ma.array([np.nan, np.nan, 0, 1, 2])
assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
nan_policy='raise')
# Testing propagate (default behavior)
assert_equal(mstats.winsorize(data, (0.4, 0.4)),
ma.array([2, 2, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8)),
ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
class TestMoments:
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# https://www.mathworks.com/help/stats/kurtosis.html
# https://www.mathworks.com/help/stats/skewness.html
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
expect = np.asarray(expect)
if shape is not None:
expect = np.broadcast_to(expect, shape)
assert_array_equal(actual, expect)
if dtype is None:
dtype = expect.dtype
assert actual.dtype == dtype
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
# check array_like input for moment
y = mstats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = mstats.moment(self.testcase, 0.0)
assert_allclose(y, 1.0)
assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = mstats.moment([])
self._assert_equal(y, np.nan, dtype=np.float64)
y = mstats.moment(np.array([], dtype=np.float32))
self._assert_equal(y, np.nan, dtype=np.float32)
y = mstats.moment(np.zeros((1, 0)), axis=0)
self._assert_equal(y, [], shape=(0,), dtype=np.float64)
y = mstats.moment([[]], axis=1)
self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
y = mstats.moment([[]], moment=[0, 1], axis=0)
self._assert_equal(y, [], shape=(2, 0))
x = np.arange(10.)
x[9] = np.nan
assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_variation_ddof(self):
# test variation with delta degrees of freedom
# regression test for gh-13341
a = np.array([1, 2, 3, 4, 5])
y = mstats.variation(a, ddof=1)
assert_almost_equal(y, 0.5270462766947299)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
# test that skew works on multidimensional masked arrays
correct_2d = ma.array(
np.array([0.6882870394455785, 0, 0.2665647526856708,
0, -0.05211472114254485]),
mask=np.array([False, False, False, True, False], dtype=bool)
)
assert_allclose(mstats.skew(self.testcase_2d, 1), correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.skew(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([1.685952043212545, 0.0, 0.3973712716070531, 0,
-0.09026534484117164]),
mask=np.array([False, False, False, True, False], dtype=bool)
)
assert_allclose(mstats.skew(self.testcase_2d, 1, bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.skew(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_allclose(mstats.skew(self.testcase_2d[2, :]),
stats.skew(self.testcase_2d[2, :]))
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_almost_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047, 10)
y = mstats.kurtosis(self.testcase, 0, 0)
assert_almost_equal(y, 1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]),
nulp=4)
class TestMode:
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
def test_mode_modifies_input(self):
# regression test for gh-6428: mode(..., axis=None) may not modify
# the input array
im = np.zeros((100, 100))
im[:50, :] += 1
im[:, :50] += 1
cp = im.copy()
mstats.mode(im, None)
assert_equal(im, cp)
class TestPercentile:
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])
class TestVariability:
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12) # noqa: E712
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc:
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),
result, 4)
def test_ks_2samp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter, spring, summer, fall) = x.T
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),
(0.1818, 0.9628))
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),
(0.1469, 0.6886))
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),
(0.1818, 0.6011))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
result = mstats.linregress(x, y)
# Result is of a correct class and with correct fields
lr = stats._stats_mstats_common.LinregressResult
assert_(isinstance(result, lr))
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(result, attributes, ma=True)
assert 'intercept_stderr' in dir(result)
# Slope and intercept are estimated correctly
assert_almost_equal(result.slope, 0.19644990055858422)
assert_almost_equal(result.intercept, 10.211269918932341)
assert_almost_equal(result.stderr, 0.002395781449783862)
assert_almost_equal(result.intercept_stderr, 0.13866936078570702)
def test_linregress_identical_x():
x = np.zeros(10)
y = np.random.random(10)
msg = "Cannot calculate a linear regression if all x values are identical"
with assert_raises(ValueError, match=msg):
mstats.linregress(x, y)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1],
method='joint')
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.0)
# Test for correct masking.
y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
slope, intercept, lower, upper = mstats.theilslopes(y,
method='joint')
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 0.0)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(intercept, 4.0)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07,
method='joint')
assert_almost_equal(slope, 4)
assert_almost_equal(intercept, 6.0)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_theilslopes_warnings():
# Test `theilslopes` with degenerate input; see gh-15943
with pytest.warns(RuntimeWarning, match="All `x` coordinates are..."):
res = mstats.theilslopes([0, 1], [0, 0])
assert np.all(np.isnan(res))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered...")
res = mstats.theilslopes([0, 0, 0], [0, 1, 0])
assert_allclose(res, (0, 0, np.nan, np.nan))
def test_theilslopes_namedtuple_consistency():
"""
Simple test to ensure tuple backwards-compatibility of the returned
TheilslopesResult object
"""
y = [1, 2, 4]
x = [4, 6, 8]
slope, intercept, low_slope, high_slope = mstats.theilslopes(y, x)
result = mstats.theilslopes(y, x)
# note all four returned values are distinct here
assert_equal(slope, result.slope)
assert_equal(intercept, result.intercept)
assert_equal(low_slope, result.low_slope)
assert_equal(high_slope, result.high_slope)
def test_siegelslopes():
# method should be exact for straight line
y = 2 * np.arange(10) + 0.5
assert_equal(mstats.siegelslopes(y), (2.0, 0.5))
assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))
x = 2 * np.arange(10)
y = 5 * x - 3.0
assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))
# method is robust to outliers: brekdown point of 50%
y[:4] = 1000
assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
# if there are no outliers, results should be comparble to linregress
x = np.arange(10)
y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)
slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)
slope, intercept = mstats.siegelslopes(y, x)
assert_allclose(slope, slope_ols, rtol=0.1)
assert_allclose(intercept, intercept_ols, rtol=0.1)
slope, intercept = mstats.siegelslopes(y, x, method='separate')
assert_allclose(slope, slope_ols, rtol=0.1)
assert_allclose(intercept, intercept_ols, rtol=0.1)
def test_siegelslopes_namedtuple_consistency():
"""
Simple test to ensure tuple backwards-compatibility of the returned
SiegelslopesResult object.
"""
y = [1, 2, 4]
x = [4, 6, 8]
slope, intercept = mstats.siegelslopes(y, x)
result = mstats.siegelslopes(y, x)
# note both returned values are distinct here
assert_equal(slope, result.slope)
assert_equal(intercept, result.intercept)
def test_sen_seasonal_slopes():
rng = np.random.default_rng(5765986256978575148)
x = rng.random(size=(100, 4))
intra_slope, inter_slope = mstats.sen_seasonal_slopes(x)
# reference implementation from the `sen_seasonal_slopes` documentation
def dijk(yi):
n = len(yi)
x = np.arange(n)
dy = yi - yi[:, np.newaxis]
dx = x - x[:, np.newaxis]
mask = np.triu(np.ones((n, n), dtype=bool), k=1)
return dy[mask]/dx[mask]
for i in range(4):
assert_allclose(np.median(dijk(x[:, i])), intra_slope[i])
all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])])
assert_allclose(np.median(all_slopes), inter_slope)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_regression_9033(self):
# x cleary non-normal but power of negtative denom needs
# to be handled correctly to reject normality
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
@pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, test, alternative):
x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
stats_test = getattr(stats, test)
mstats_test = getattr(mstats, test)
z_ex, p_ex = stats_test(x, alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
# test with masked arrays
x[1:5] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
def test_bad_alternative(self):
x = stats.norm.rvs(size=20, random_state=123)
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.skewtest(x, alternative='error')
with pytest.raises(ValueError, match=msg):
mstats.kurtosistest(x, alternative='error')
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
# data from gh-1428
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
def test_result_attributes(self):
res = mstats.mannwhitneyu(self.x, self.y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_against_stats(self):
# gh-4641 reported that stats.mannwhitneyu returned half the p-value
# of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu
# is now two-sided, so they match.
res1 = mstats.mannwhitneyu(self.x, self.y)
res2 = stats.mannwhitneyu(self.x, self.y)
assert res1.statistic == res2.statistic
assert_allclose(res1.pvalue, res2.pvalue)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
# TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2),
mask=[[1, 1, 1], [0, 0, 0]])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
t, p = mstats.ttest_rel(*pair)
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
assert_array_equal(t, np.array([np.nan, np.nan]))
assert_array_equal(p, np.array([np.nan, np.nan]))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)
y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)
t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)
t, p = mstats.ttest_rel(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
# test with masked arrays
x[1:10] = np.nan
y[1:10] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
y = np.ma.masked_array(y, mask=np.isnan(y))
t, p = mstats.ttest_rel(x, y, alternative=alternative)
t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),
alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
# Check equal_var
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
assert_allclose(res4, res5)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
assert_allclose(res4, res5)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
t, p = mstats.ttest_ind(*pair)
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
equal_var=False), (np.nan, np.nan))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)
t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)
t, p = mstats.ttest_ind(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
# test with masked arrays
x[1:10] = np.nan
y[80:90] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
y = np.ma.masked_array(y, mask=np.isnan(y))
t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),
alternative=alternative)
t, p = mstats.ttest_ind(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
expected = (np.nan, np.nan)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:
t, p = mstats.ttest_1samp(*pair)
assert_array_equal(p, expected)
assert_array_equal(t, expected)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_1samp([0, 0, 0], 0)
assert_(np.isnan(t))
assert_array_equal(p, (np.nan, np.nan))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)
t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
# test with masked arrays
x[1:10] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,
alternative=alternative)
t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestDescribe:
"""
Tests for mstats.describe.
Note that there are also tests for `mstats.describe` in the
class TestCompareWithStats.
"""
def test_basic_with_axis(self):
# This is a basic test that is also a regression test for gh-7303.
a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],
[5, 5, 0, 9, 3, 3]],
mask=[[0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 0]])
result = mstats.describe(a, axis=1)
assert_equal(result.nobs, [5, 4])
amin, amax = result.minmax
assert_equal(amin, [0, 3])
assert_equal(amax, [4, 5])
assert_equal(result.mean, [2.0, 4.0])
assert_equal(result.variance, [2.0, 1.0])
assert_equal(result.skewness, [0.0, 0.0])
assert_allclose(result.kurtosis, [-1.3, -2.0])
class TestCompareWithStats:
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.full(len(x) + 5, 1e16)
ym = np.full(len(y) + 5, 1e16)
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.full((n, nx), np.nan)
y = np.full((n, nx), np.nan)
xm = np.full((n+5, nx), np.nan)
ym = np.full((n+5, nx), np.nan)
for i in range(nx):
x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
result1 = stats.linregress(x, y)
result2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(result1), np.asarray(result2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_spearmanr_backcompat_useties(self):
# A regression test to ensure we don't break backwards compat
# more than we have to (see gh-9204).
x = np.arange(6)
assert_raises(ValueError, mstats.spearmanr, x, x, False)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
# reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
# validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
# compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5, 4)
am = np.ma.array(a)
r = stats.sem(a, ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
assert_almost_equal(stats.tmax(x, upperlimit=3.),
stats.mstats.tmax(xm, upperlimit=3.), 10)
assert_almost_equal(stats.tmax(y, upperlimit=3.),
stats.mstats.tmax(ym, upperlimit=3.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
stats.mstats.tmin(xm, lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
stats.mstats.tmin(ym, lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x, y)
zm = stats.mstats.zmap(xm, ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
decimal=14)
assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
decimal=14)
assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
stats.mstats.tsem(xm, limits=(-2., 2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r, rm)
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0][0], rm[0][0], rtol=1e-14)
assert_allclose(r[0][1], rm[0][1], rtol=1e-14)
def test_normaltest(self):
with np.errstate(over='raise'), suppress_warnings() as sup:
sup.filter(UserWarning, "kurtosistest only valid for n>=20")
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
x_orig, xm_orig = x.copy(), xm.copy()
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r, rm)
assert_equal(x, x_orig)
assert_equal(xm, xm_orig)
# This crazy behavior is expected by count_tied_groups, but is not
# in the docstring...
_, counts = stats.mstats.find_repeats([])
assert_equal(counts, np.array(0, dtype=np.intp))
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
def test_ks_1samp(self):
"""Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings():
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_1samp(self):
"""Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings():
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, 'norm', alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, 'norm', alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_ks_2samp(self):
"""Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.
gh-8431"""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
if mode in ['auto', 'exact']:
message = "ks_2samp: Exact calculation unsuccessful."
sup.filter(RuntimeWarning, message)
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_2samp(x, y, alternative=alternative, mode=mode)
res2 = stats.mstats.ks_2samp(xm, ym, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_2samp(xm, y, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_2samp(self):
"""Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
if mode in ['auto', 'exact']:
message = "ks_2samp: Exact calculation unsuccessful."
sup.filter(RuntimeWarning, message)
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, y, alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, ym, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, y, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
class TestBrunnerMunzel:
# Data from (Lumley, 1996)
X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,
1, 1, 1, 2, 4, 1, 1, np.nan])
Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])
significant = 14
def test_brunnermunzel_one_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')
u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')
u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')
u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(p3, p4, decimal=self.significant)
assert_(p1 != p3)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u3, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u4, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0028931043330757342,
decimal=self.significant)
assert_almost_equal(p3, 0.99710689566692423,
decimal=self.significant)
def test_brunnermunzel_two_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')
u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0057862086661515377,
decimal=self.significant)
def test_brunnermunzel_default(self):
# The default value for alternative is two-sided
u1, p1 = mstats.brunnermunzel(self.X, self.Y)
u2, p2 = mstats.brunnermunzel(self.Y, self.X)
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0057862086661515377,
decimal=self.significant)
def test_brunnermunzel_alternative_error(self):
alternative = "error"
distribution = "t"
assert_(alternative not in ["two-sided", "greater", "less"])
assert_raises(ValueError,
mstats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution)
def test_brunnermunzel_distribution_norm(self):
u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal")
u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal")
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0017041417600383024,
decimal=self.significant)
def test_brunnermunzel_distribution_error(self):
alternative = "two-sided"
distribution = "error"
assert_(alternative not in ["t", "normal"])
assert_raises(ValueError,
mstats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution)
def test_brunnermunzel_empty_imput(self):
u1, p1 = mstats.brunnermunzel(self.X, [])
u2, p2 = mstats.brunnermunzel([], self.Y)
u3, p3 = mstats.brunnermunzel([], [])
assert_(np.isnan(u1))
assert_(np.isnan(p1))
assert_(np.isnan(u2))
assert_(np.isnan(p2))
assert_(np.isnan(u3))
assert_(np.isnan(p3))
| 85,050
| 41.042017
| 108
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_crosstab.py
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from scipy.stats.contingency import crosstab
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_basic(sparse):
a = [0, 0, 9, 9, 0, 0, 9]
b = [2, 1, 3, 1, 2, 3, 3]
expected_avals = [0, 9]
expected_bvals = [1, 2, 3]
expected_count = np.array([[1, 2, 1],
[1, 0, 2]])
(avals, bvals), count = crosstab(a, b, sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
def test_crosstab_basic_1d():
# Verify that a single input sequence works as expected.
x = [1, 2, 3, 1, 2, 3, 3]
expected_xvals = [1, 2, 3]
expected_count = np.array([2, 2, 3])
(xvals,), count = crosstab(x)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(count, expected_count)
def test_crosstab_basic_3d():
# Verify the function for three input sequences.
a = 'a'
b = 'b'
x = [0, 0, 9, 9, 0, 0, 9, 9]
y = [a, a, a, a, b, b, b, a]
z = [1, 2, 3, 1, 2, 3, 3, 1]
expected_xvals = [0, 9]
expected_yvals = [a, b]
expected_zvals = [1, 2, 3]
expected_count = np.array([[[1, 1, 0],
[0, 1, 1]],
[[2, 0, 1],
[0, 0, 1]]])
(xvals, yvals, zvals), count = crosstab(x, y, z)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(yvals, expected_yvals)
assert_array_equal(zvals, expected_zvals)
assert_array_equal(count, expected_count)
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_levels(sparse):
a = [0, 0, 9, 9, 0, 0, 9]
b = [1, 2, 3, 1, 2, 3, 3]
expected_avals = [0, 9]
expected_bvals = [0, 1, 2, 3]
expected_count = np.array([[0, 1, 2, 1],
[0, 1, 0, 2]])
(avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]],
sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_extra_levels(sparse):
# The pair of values (-1, 3) will be ignored, because we explicitly
# request the counted `a` values to be [0, 9].
a = [0, 0, 9, 9, 0, 0, 9, -1]
b = [1, 2, 3, 1, 2, 3, 3, 3]
expected_avals = [0, 9]
expected_bvals = [0, 1, 2, 3]
expected_count = np.array([[0, 1, 2, 1],
[0, 1, 0, 2]])
(avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]],
sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
def test_validation_at_least_one():
with pytest.raises(TypeError, match='At least one'):
crosstab()
def test_validation_same_lengths():
with pytest.raises(ValueError, match='must have the same length'):
crosstab([1, 2], [1, 2, 3, 4])
def test_validation_sparse_only_two_args():
with pytest.raises(ValueError, match='only two input sequences'):
crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True)
def test_validation_len_levels_matches_args():
with pytest.raises(ValueError, match='number of input sequences'):
crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],))
def test_result():
res = crosstab([0, 1], [1, 2])
assert_equal((res.elements, res.count), res)
| 3,882
| 32.474138
| 73
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_generation/reference_distributions.py
|
import numpy as np
import mpmath
from mpmath import mp
class ReferenceDistribution:
"""Minimalist distribution infrastructure for generating reference data.
The purpose is to generate reference values for unit tests of SciPy
distribution accuracy and robustness.
Handles array input with standard broadcasting rules, and method
implementations are easily compared against their mathematical definitions.
No attempt is made to handle edge cases or be fast, and arbitrary precision
arithmetic is trusted for accuracy rather than making the method
implementations "smart".
Notes
-----
In this infrastructure, distributions families are classes, and
fully-specified distributions (i.e. with definite values of all family
parameters) are instances of these classes. Typically, the public methods
accept as input only the argument at which the at which the function is to
be evaluated. Unlike SciPy distributions, they never accept values of
distribution family shape, location, or scale parameters. A few
other parameters are noteworthy:
- All methods accept `dtype` to control the output data type. The default
is `np.float64`, but `object` or `mp.mpf` may be
specified to output the full `mpf`.
- `ppf`/`isf` accept a `guess` because they use a scalar rootfinder
to invert the `cdf`/`sf`. This is passed directly into the `x0` method
of `mpmath.findroot`; see its documentation for details.
- moment accepts `order`, an integer that specifies the order of the (raw)
moment, and `center`, which is the value about which the moment is
taken. The default is to calculate the mean and use it to calculate
central moments; passing `0` results in a noncentral moment. For
efficiency, the mean can be passed explicitly if it is already known.
Follow the example of SkewNormal to generate new reference distributions,
overriding only `__init__` and `_pdf`*. Use the reference distributions to
generate reference values for unit tests of SciPy distribution method
precision and robustness (e.g. for extreme arguments). If the a SciPy
methods implementation is independent and yet the output matches reference
values generated with this infrastructure, it is unlikely that the SciPy
and reference values are both inaccurate.
* If the SciPy output *doesn't* match and the cause appears to be
inaccuracy of the reference values (e.g. due to numerical issues that
mpmath's arbitrary precision arithmetic doesn't handle), then it may be
appropriate to override a method of the reference distribution rather than
relying on the generic implementation. Otherwise, hesistate to override
methods: the generic implementations are mathematically correct and easy
to verify, whereas an override introduces many possibilities of mistakes,
requires more time to write, and requires more time to review.
In general, do not create custom unit tests to ensure that
SciPy distribution methods are *correct* (in the sense of being consistent
with the rest of the distribution methods); generic tests take care of
that.
"""
def __init__(self, **kwargs):
try:
if mpmath.dps is not None:
message = ("`mpmath.dps` has been assigned. This is not "
"intended usage; instead, assign the desired "
"precision to `mpmath.mp.dps` (e.g. `from mpmath "
"as mp; mp.dps = 50.")
raise RuntimeError(message)
except AttributeError:
mpmath.dps = None
if mp.dps <= 15:
message = ("`mpmath.mp.dps <= 15`. Set a higher precision (e.g."
"`50`) to use this distribution.")
raise RuntimeError(message)
self._params = {key:self._make_mpf_array(val)
for key, val in kwargs.items()}
def _make_mpf_array(self, x):
shape = np.shape(x)
x = np.asarray(x, dtype=np.float64).ravel()
return np.asarray([mp.mpf(xi) for xi in x]).reshape(shape)[()]
def _pdf(self, x):
raise NotImplementedError("_pdf must be overridden.")
def _cdf(self, x, **kwargs):
if ((self._cdf.__func__ is ReferenceDistribution._cdf)
and (self._sf.__func__ is not ReferenceDistribution._sf)):
return mp.one - self._sf(x, **kwargs)
a, b = self._support(**kwargs)
res = mp.quad(lambda x: self._pdf(x, **kwargs), (a, x))
res = res if res < 0.5 else mp.one - self._sf(x, **kwargs)
return res
def _sf(self, x, **kwargs):
if ((self._sf.__func__ is ReferenceDistribution._sf)
and (self._cdf.__func__ is not ReferenceDistribution._cdf)):
return mp.one - self._cdf(x, **kwargs)
a, b = self._support(**kwargs)
res = mp.quad(lambda x: self._pdf(x, **kwargs), (x, b))
res = res if res < 0.5 else mp.one - self._cdf(x, **kwargs)
return res
def _ppf(self, p, guess=0, **kwargs):
if ((self._ppf.__func__ is ReferenceDistribution._ppf)
and (self._isf.__func__ is not ReferenceDistribution._isf)):
return self._isf(mp.one - p, guess, **kwargs)
def f(x):
return self._cdf(x, **kwargs) - p
return mp.findroot(f, guess)
def _isf(self, p, guess=0, **kwargs):
if ((self._isf.__func__ is ReferenceDistribution._isf)
and (self._ppf.__func__ is not ReferenceDistribution._ppf)):
return self._ppf(mp.one - p, guess, **kwargs)
def f(x):
return self._sf(x, **kwargs) - p
return mp.findroot(f, guess)
def _logpdf(self, x, **kwargs):
return mp.log(self._pdf(x, **kwargs))
def _logcdf(self, x, **kwargs):
return mp.log(self._cdf(x, **kwargs))
def _logsf(self, x, **kwargs):
return mp.log(self._sf(x, **kwargs))
def _support(self, **kwargs):
return -mp.inf, mp.inf
def _entropy(self, **kwargs):
def integrand(x):
logpdf = self._logpdf(x, **kwargs)
pdf = mp.exp(logpdf)
return -pdf*logpdf
a, b = self._support(**kwargs)
return mp.quad(integrand, (a, b))
def _mean(self, **kwargs):
return self._moment(order=1, center=0, **kwargs)
def _var(self, **kwargs):
mu = self._mean(**kwargs)
return self._moment(order=2, center=mu, **kwargs)
def _skew(self, **kwargs):
mu = self._mean(**kwargs)
u2 = self._moment(order=2, center=mu, **kwargs)
sigma = mp.sqrt(u2)
u3 = self._moment(order=3, center=mu, **kwargs)
return u3 / sigma**3
def _kurtosis(self, **kwargs):
mu = self._mean(**kwargs)
u2 = self._moment(order=2, center=mu, **kwargs)
u4 = self._moment(order=4, center=mu, **kwargs)
return u4 / u2**2 - 3
def _moment(self, order, center, **kwargs):
def integrand(x):
return self._pdf(x, **kwargs)*(x - center)**order
if center is None:
center = self._mean(**kwargs)
a, b = self._support(**kwargs)
return mp.quad(integrand, (a, b))
def pdf(self, x, dtype=np.float64):
fun = np.vectorize(self._pdf)
x = self._make_mpf_array(x)
res = fun(x, **self._params)
return np.asarray(res, dtype=dtype)[()]
def cdf(self, x, dtype=np.float64):
fun = np.vectorize(self._cdf)
x = self._make_mpf_array(x)
res = fun(x, **self._params)
return np.asarray(res, dtype=dtype)[()]
def sf(self, x, dtype=np.float64):
fun = np.vectorize(self._sf)
x = self._make_mpf_array(x)
res = fun(x, **self._params)
return np.asarray(res, dtype=dtype)[()]
def ppf(self, x, guess=0, dtype=np.float64):
fun = np.vectorize(self._ppf, excluded={1}) # don't vectorize guess
x = self._make_mpf_array(x)
res = fun(x, guess, **self._params)
return np.asarray(res, dtype=dtype)[()]
def isf(self, x, guess=0, dtype=np.float64):
fun = np.vectorize(self._isf, excluded={1}) # don't vectorize guess
x = self._make_mpf_array(x)
res = fun(x, guess, **self._params)
return np.asarray(res, dtype=dtype)[()]
def logpdf(self, x, dtype=np.float64):
fun = np.vectorize(self._logpdf)
x = self._make_mpf_array(x)
res = fun(x, **self._params)
return np.asarray(res, dtype=dtype)[()]
def logcdf(self, x, dtype=np.float64):
fun = np.vectorize(self._logcdf)
x = self._make_mpf_array(x)
res = fun(x, **self._params)
return np.asarray(res, dtype=dtype)[()]
def logsf(self, x, dtype=np.float64):
fun = np.vectorize(self._logsf)
x = self._make_mpf_array(x)
res = fun(x, **self._params)
return np.asarray(res, dtype=dtype)[()]
def support(self, dtype=np.float64):
fun = np.vectorize(self._support)
res = fun(**self._params)
return np.asarray(res, dtype=dtype)[()]
def entropy(self, dtype=np.float64):
fun = np.vectorize(self._entropy)
res = fun(**self._params)
return np.asarray(res, dtype=dtype)[()]
def mean(self, dtype=np.float64):
fun = np.vectorize(self._mean)
res = fun(**self._params)
return np.asarray(res, dtype=dtype)[()]
def var(self, dtype=np.float64):
fun = np.vectorize(self._var)
res = fun(**self._params)
return np.asarray(res, dtype=dtype)[()]
def skew(self, dtype=np.float64):
fun = np.vectorize(self._skew)
res = fun(**self._params)
return np.asarray(res, dtype=dtype)[()]
def kurtosis(self, dtype=np.float64):
fun = np.vectorize(self._kurtosis)
res = fun(**self._params)
return np.asarray(res, dtype=dtype)[()]
def moment(self, order, center=None, dtype=np.float64):
fun = np.vectorize(self._moment)
order = self._make_mpf_array(order)
res = fun(order, **self._params)
return np.asarray(res, dtype=dtype)[()]
class SkewNormal(ReferenceDistribution):
"""Reference implementation of the SkewNormal distribution.
Follow the example here to generate new reference distributions.
Use the reference distributions to generate reference values of
distributions functions. For now, copy-paste the output into unit
tests. Full code to generate reference values does not need to be
included as a comment in the test; just refer to the reference
distribution used and the settings (e.g. mp.dps=50).
"""
def __init__(self, *, a):
# Overriding __init__ is not necessary, but it allows IDEs to hint at
# shape parameters. All parameters are keyword only to avoid the
# ambiguity inherent in positional arguments. The infrastructure does
# not take care of location and scale; nonetheless, assume standard
# location and scale. Typically, there is no need to test the SciPy
# distribution infrastructure's treatment of location and scale
# separately for a specific distribution.
super().__init__(a=a)
def _support(self, a):
# Override _support if the support of the distribution is a subset of
# the real line
return -mp.inf, mp.inf
def _pdf(self, x, a):
# Write PDFs following a scholarly reference as closely as possible.
# Trust mpmath for the accuracy, and don't worry about speed. What's
# important is the ease of verifying the PDF against the reference. If
# the result is inaccurate, it will not match SciPy's output (whether
# SciPy is accurate or not). If this occurs, try increasing dps before
# implementing a numerically favorable (but presumably more complex)
# implementation.
return 2 * mp.npdf(x) * mp.ncdf(a * x)
# Avoid overriding other methods unless the generic implementation is
# believed to be inaccurate (e.g. due to numerical difficulties) or it is
# too slow. Why? Less code to write, less code to review, and a guarantee
# that there is no *mistake* in the implementation (e.g. wrong formula).
class BetaPrime(ReferenceDistribution):
def __init__(self, *, a, b):
super().__init__(a=a, b=b)
def _support(self, **kwargs):
return mp.zero, mp.inf
def _logpdf(self, x, a, b):
return (a - mp.one)*mp.log(x) - (a + b)*mp.log1p(x) - mp.log(mp.beta(a, b))
def _pdf(self, x, a, b):
return mp.exp(self._logpdf(x=x, a=a, b=b))
def _sf(self, x, a, b):
return 1.0 - mp.betainc(a, b, 0, x/(1+x), regularized=True)
class Burr(ReferenceDistribution):
def __init__(self, *, c, d):
super().__init__(c=c, d=d)
def _support(self, c, d):
return 0, mp.inf
def _pdf(self, x, c, d):
return c * d * x ** (-c - 1) * (1 + x ** (-c)) ** (-d - 1)
def _ppf(self, p, guess, c, d):
return (p**(-1.0/d) - 1)**(-1.0/c)
class LogLaplace(ReferenceDistribution):
def __init__(self, *, c):
super().__init__(c=c)
def _support(self, c):
return 0, mp.inf
def _pdf(self, x, c):
if x < mp.one:
return c / 2 * x**(c - mp.one)
else:
return c / 2 * x**(-c - mp.one)
def _ppf(self, q, guess, c):
if q < 0.5:
return (2.0 * q)**(mp.one / c)
else:
return (2 * (mp.one - q))**(-mp.one / c)
class LogNormal(ReferenceDistribution):
def __init__(self, *, s):
super().__init__(s=s)
def _support(self, s):
return 0, mp.inf
def _pdf(self, x, s):
return (
mp.one / (s * x * mp.sqrt(2 * mp.pi))
* mp.exp(-mp.one / 2 * (mp.log(x) / s)**2)
)
def _cdf(self, x, s):
return mp.ncdf(mp.log(x) / s)
class Normal(ReferenceDistribution):
def _pdf(self, x):
return mp.npdf(x)
class NormInvGauss(ReferenceDistribution):
def __init__(self, *, alpha, beta):
super().__init__(alpha=alpha, beta=beta)
def _pdf(self, x, alpha, beta):
# Implemented as described in https://www.jstor.org/stable/4616433
# Equations 2.1 - 2.3
q = mp.sqrt(1 + x**2)
a = mp.pi**-1 * alpha * mp.exp(mp.sqrt(alpha**2 - beta**2))
return a * q**-1 * mp.besselk(1, alpha*q) * mp.exp(beta*x)
class Pearson3(ReferenceDistribution):
def __init__(self, *, skew):
super().__init__(skew=skew)
def _pdf(self, x, skew):
b = 2 / skew
a = b**2
c = -b
res = abs(b)/mp.gamma(a) * (b*(x-c))**(a-1) * mp.exp(-b*(x-c))
return res if abs(res.real) == res else 0
class StudentT(ReferenceDistribution):
def __init(self, *, df):
super().__init__(df=df)
def _pdf(self, x, df):
return (mp.gamma((df + mp.one)/2)/(mp.sqrt(df * mp.pi) * mp.gamma(df/2))
* (mp.one + x*x/df)**(-(df + mp.one)/2))
class TruncExpon(ReferenceDistribution):
def __init__(self, *, b):
super().__init__(b=b)
def _support(self, b):
return 0, b
def _pdf(self, x, b):
return -mp.exp(-x)/mp.expm1(-b)
def _sf(self, x, b):
return (mp.exp(-b) - mp.exp(-x))/mp.expm1(-b)
| 15,416
| 34.770302
| 83
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_generation/studentized_range_mpmath_ref.py
|
# To run this script, run
# `python studentized_range_mpmath_ref.py`
# in the "scipy/stats/tests/" directory
# This script generates a JSON file "./data/studentized_range_mpmath_ref.json"
# that is used to compare the accuracy of `studentized_range` functions against
# precise (20 DOP) results generated using `mpmath`.
# Equations in this file have been taken from
# https://en.wikipedia.org/wiki/Studentized_range_distribution
# and have been checked against the following reference:
# Lund, R. E., and J. R. Lund. "Algorithm AS 190: Probabilities and
# Upper Quantiles for the Studentized Range." Journal of the Royal
# Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
# 1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
# Feb. 2021.
# Note: I would have prefered to use pickle rather than JSON, but -
# due to security concerns - decided against it.
import itertools
from collections import namedtuple
import json
import time
import os
from multiprocessing import Pool, cpu_count
from mpmath import gamma, pi, sqrt, quad, inf, mpf, mp
from mpmath import npdf as phi
from mpmath import ncdf as Phi
results_filepath = "data/studentized_range_mpmath_ref.json"
num_pools = max(cpu_count() - 1, 1)
MPResult = namedtuple("MPResult", ["src_case", "mp_result"])
CdfCase = namedtuple("CdfCase",
["q", "k", "v", "expected_atol", "expected_rtol"])
MomentCase = namedtuple("MomentCase",
["m", "k", "v", "expected_atol", "expected_rtol"])
# Load previously generated JSON results, or init a new dict if none exist
if os.path.isfile(results_filepath):
res_dict = json.load(open(results_filepath))
else:
res_dict = dict()
# Frame out data structure. Store data with the function type as a top level
# key to allow future expansion
res_dict["COMMENT"] = ("!!!!!! THIS FILE WAS AUTOGENERATED BY RUNNING "
"`python studentized_range_mpmath_ref.py` !!!!!!")
res_dict.setdefault("cdf_data", [])
res_dict.setdefault("pdf_data", [])
res_dict.setdefault("moment_data", [])
general_atol, general_rtol = 1e-11, 1e-11
mp.dps = 24
cp_q = [0.1, 1, 4, 10]
cp_k = [3, 10, 20]
cp_nu = [3, 10, 20, 50, 100, 120]
cdf_pdf_cases = [
CdfCase(*case,
general_atol,
general_rtol)
for case in
itertools.product(cp_q, cp_k, cp_nu)
]
mom_atol, mom_rtol = 1e-9, 1e-9
# These are EXTREMELY slow - Multiple days each in worst case.
moment_cases = [
MomentCase(i, 3, 10, mom_atol, mom_rtol)
for i in range(5)
]
def write_data():
"""Writes the current res_dict to the target JSON file"""
with open(results_filepath, mode="w") as f:
json.dump(res_dict, f, indent=2)
def to_dict(named_tuple):
"""Converts a namedtuple to a dict"""
return dict(named_tuple._asdict())
def mp_res_to_dict(mp_result):
"""Formats an MPResult namedtuple into a dict for JSON dumping"""
return {
"src_case": to_dict(mp_result.src_case),
# np assert can't handle mpf, so take the accuracy hit here.
"mp_result": float(mp_result.mp_result)
}
def cdf_mp(q, k, nu):
"""Straightforward implementation of studentized range CDF"""
q, k, nu = mpf(q), mpf(k), mpf(nu)
def inner(s, z):
return phi(z) * (Phi(z + q * s) - Phi(z)) ** (k - 1)
def outer(s, z):
return s ** (nu - 1) * phi(sqrt(nu) * s) * inner(s, z)
def whole(s, z):
return (sqrt(2 * pi) * k * nu ** (nu / 2)
/ (gamma(nu / 2) * 2 ** (nu / 2 - 1)) * outer(s, z))
res = quad(whole, [0, inf], [-inf, inf],
method="gauss-legendre", maxdegree=10)
return res
def pdf_mp(q, k, nu):
"""Straightforward implementation of studentized range PDF"""
q, k, nu = mpf(q), mpf(k), mpf(nu)
def inner(s, z):
return phi(z + q * s) * phi(z) * (Phi(z + q * s) - Phi(z)) ** (k - 2)
def outer(s, z):
return s ** nu * phi(sqrt(nu) * s) * inner(s, z)
def whole(s, z):
return (sqrt(2 * pi) * k * (k - 1) * nu ** (nu / 2)
/ (gamma(nu / 2) * 2 ** (nu / 2 - 1)) * outer(s, z))
res = quad(whole, [0, inf], [-inf, inf],
method="gauss-legendre", maxdegree=10)
return res
def moment_mp(m, k, nu):
"""Implementation of the studentized range moment"""
m, k, nu = mpf(m), mpf(k), mpf(nu)
def inner(q, s, z):
return phi(z + q * s) * phi(z) * (Phi(z + q * s) - Phi(z)) ** (k - 2)
def outer(q, s, z):
return s ** nu * phi(sqrt(nu) * s) * inner(q, s, z)
def pdf(q, s, z):
return (sqrt(2 * pi) * k * (k - 1) * nu ** (nu / 2)
/ (gamma(nu / 2) * 2 ** (nu / 2 - 1)) * outer(q, s, z))
def whole(q, s, z):
return q ** m * pdf(q, s, z)
res = quad(whole, [0, inf], [0, inf], [-inf, inf],
method="gauss-legendre", maxdegree=10)
return res
def result_exists(set_key, case):
"""Searches the results dict for a result in the set that matches a case.
Returns True if such a case exists."""
if set_key not in res_dict:
raise ValueError(f"{set_key} not present in data structure!")
case_dict = to_dict(case)
existing_res = list(filter(
lambda res: res["src_case"] == case_dict, # dict comparison
res_dict[set_key]))
return len(existing_res) > 0
def run(case, run_lambda, set_key, index=0, total_cases=0):
"""Runs the single passed case, returning an mp dictionary and index"""
t_start = time.perf_counter()
res = run_lambda(case)
print(f"Finished {index + 1}/{total_cases} in batch. "
f"(Took {time.perf_counter() - t_start}s)")
return index, set_key, mp_res_to_dict(MPResult(case, res))
def write_result(res):
"""A callback for completed jobs. Inserts and writes a calculated result
to file."""
index, set_key, result_dict = res
res_dict[set_key].insert(index, result_dict)
write_data()
def run_cases(cases, run_lambda, set_key):
"""Runs an array of cases and writes to file"""
# Generate jobs to run from cases that do not have a result in
# the previously loaded JSON.
job_arg = [(case, run_lambda, set_key, index, len(cases))
for index, case in enumerate(cases)
if not result_exists(set_key, case)]
print(f"{len(cases) - len(job_arg)}/{len(cases)} cases won't be "
f"calculated because their results already exist.")
jobs = []
pool = Pool(num_pools)
# Run all using multiprocess
for case in job_arg:
jobs.append(pool.apply_async(run, args=case, callback=write_result))
pool.close()
pool.join()
def run_pdf(case):
return pdf_mp(case.q, case.k, case.v)
def run_cdf(case):
return cdf_mp(case.q, case.k, case.v)
def run_moment(case):
return moment_mp(case.m, case.k, case.v)
def main():
t_start = time.perf_counter()
total_cases = 2 * len(cdf_pdf_cases) + len(moment_cases)
print(f"Processing {total_cases} test cases")
print(f"Running 1st batch ({len(cdf_pdf_cases)} PDF cases). "
f"These take about 30s each.")
run_cases(cdf_pdf_cases, run_pdf, "pdf_data")
print(f"Running 2nd batch ({len(cdf_pdf_cases)} CDF cases). "
f"These take about 30s each.")
run_cases(cdf_pdf_cases, run_cdf, "cdf_data")
print(f"Running 3rd batch ({len(moment_cases)} moment cases). "
f"These take about anywhere from a few hours to days each.")
run_cases(moment_cases, run_moment, "moment_data")
print(f"Test data generated in {time.perf_counter() - t_start}s")
if __name__ == "__main__":
main()
| 7,631
| 29.166008
| 79
|
py
|
scipy
|
scipy-main/scipy/stats/tests/test_generation/reference_distribution_infrastructure_tests.py
|
# Note: this file is to be run locally, not on CI. It is only for tests of the
# reference distribution *infrastructure*; unit tests of SciPy distributions
# do not go here.
import numpy as np
import pytest
from scipy import stats
from numpy.testing import assert_allclose
import scipy.stats.tests.test_generation.reference_distributions as rd
import mpmath
from mpmath import mp
def test_basic():
message = "`mpmath.mp.dps <= 15`. Set a higher precision..."
with pytest.raises(RuntimeError, match=message):
rd.Normal()
mpmath.dps = 20
message = "`mpmath.dps` has been assigned. This is not intended usage..."
with pytest.raises(RuntimeError, match=message):
rd.Normal()
del mpmath.dps
mp.dps = 20 # high enough to pass, not unreasonably slow
# Basic tests of the mpmath distribution infrastructure using a SciPy
# distribution as a reference. The intent is just to make sure that the
# implementations do not have *mistakes* and that broadcasting is working
# as expected. The accuracy is what it is.
rng = np.random.default_rng(6716188855217730280)
x = rng.random(size=3)
a = rng.random(size=(2, 1))
rtol = 1e-15
dist = rd.SkewNormal(a=a)
dist_ref = stats.skewnorm(a)
assert_allclose(dist.pdf(x), dist_ref.pdf(x), rtol=rtol)
assert_allclose(dist.cdf(x), dist_ref.cdf(x), rtol=rtol)
assert_allclose(dist.sf(x), dist_ref.sf(x), rtol=rtol)
assert_allclose(dist.ppf(x), dist_ref.ppf(x), rtol=rtol)
assert_allclose(dist.isf(x), dist_ref.isf(x), rtol=rtol)
assert_allclose(dist.logpdf(x), dist_ref.logpdf(x), rtol=rtol)
assert_allclose(dist.logcdf(x), dist_ref.logcdf(x), rtol=rtol)
assert_allclose(dist.logsf(x), dist_ref.logsf(x), rtol=rtol)
assert_allclose(dist.support(), dist_ref.support(), rtol=rtol)
assert_allclose(dist.entropy(), dist_ref.entropy(), rtol=rtol)
assert_allclose(dist.mean(), dist_ref.mean(), rtol=rtol)
assert_allclose(dist.var(), dist_ref.var(), rtol=rtol)
assert_allclose(dist.skew(), dist_ref.stats('s'), rtol=rtol)
assert_allclose(dist.kurtosis(), dist_ref.stats('k'), rtol=rtol)
def test_complementary_method_use():
# Show that complementary methods are used as expected.
# E.g., use 1 - CDF to compute SF if CDF is overridden but SF is not
mp.dps = 50
x = np.linspace(0, 1, 10)
class MyDist(rd.ReferenceDistribution):
def _cdf(self, x):
return x
dist = MyDist()
assert_allclose(dist.sf(x), 1 - dist.cdf(x))
class MyDist(rd.ReferenceDistribution):
def _sf(self, x):
return 1-x
dist = MyDist()
assert_allclose(dist.cdf(x), 1 - dist.sf(x))
class MyDist(rd.ReferenceDistribution):
def _ppf(self, x, guess):
return x
dist = MyDist()
assert_allclose(dist.isf(x), dist.ppf(1-x))
class MyDist(rd.ReferenceDistribution):
def _isf(self, x, guess):
return 1-x
dist = MyDist()
assert_allclose(dist.ppf(x), dist.isf(1-x))
| 3,041
| 32.428571
| 78
|
py
|
scipy
|
scipy-main/scipy/stats/tests/data/fisher_exact_results_from_r.py
|
# DO NOT EDIT THIS FILE!
# This file was generated by the R script
# generate_fisher_exact_results_from_r.R
# The script was run with R version 3.6.2 (2019-12-12) at 2020-11-09 06:16:09
from collections import namedtuple
import numpy as np
Inf = np.inf
Parameters = namedtuple('Parameters',
['table', 'confidence_level', 'alternative'])
RResults = namedtuple('RResults',
['pvalue', 'conditional_odds_ratio',
'conditional_odds_ratio_ci'])
data = [
(Parameters(table=[[100, 2], [1000, 5]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.1300759363430016,
conditional_odds_ratio=0.25055839934223,
conditional_odds_ratio_ci=(0.04035202926536294,
2.662846672960251))),
(Parameters(table=[[2, 7], [8, 2]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.02301413756522116,
conditional_odds_ratio=0.0858623513573622,
conditional_odds_ratio_ci=(0.004668988338943325,
0.895792956493601))),
(Parameters(table=[[5, 1], [10, 10]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.1973244147157191,
conditional_odds_ratio=4.725646047336587,
conditional_odds_ratio_ci=(0.4153910882532168,
259.2593661129417))),
(Parameters(table=[[5, 15], [20, 20]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.09580440012477633,
conditional_odds_ratio=0.3394396617440851,
conditional_odds_ratio_ci=(0.08056337526385809,
1.22704788545557))),
(Parameters(table=[[5, 16], [16, 25]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.2697004098849359,
conditional_odds_ratio=0.4937791394540491,
conditional_odds_ratio_ci=(0.1176691231650079,
1.787463657995973))),
(Parameters(table=[[10, 5], [10, 1]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.1973244147157192,
conditional_odds_ratio=0.2116112781158479,
conditional_odds_ratio_ci=(0.003857141267422399,
2.407369893767229))),
(Parameters(table=[[10, 5], [10, 0]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.06126482213438735,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
1.451643573543705))),
(Parameters(table=[[5, 0], [1, 4]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.04761904761904762,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(1.024822256141754,
Inf))),
(Parameters(table=[[0, 5], [1, 4]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
39.00054996869288))),
(Parameters(table=[[5, 1], [0, 4]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.04761904761904761,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(1.024822256141754,
Inf))),
(Parameters(table=[[0, 1], [3, 2]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
39.00054996869287))),
(Parameters(table=[[200, 7], [8, 300]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=2.005657880389071e-122,
conditional_odds_ratio=977.7866978606228,
conditional_odds_ratio_ci=(349.2595113327733,
3630.382605689872))),
(Parameters(table=[[28, 21], [6, 1957]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=5.728437460831947e-44,
conditional_odds_ratio=425.2403028434684,
conditional_odds_ratio_ci=(152.4166024390096,
1425.700792178893))),
(Parameters(table=[[190, 800], [200, 900]],
confidence_level=0.95,
alternative='two.sided'),
RResults(pvalue=0.574111858126088,
conditional_odds_ratio=1.068697577856801,
conditional_odds_ratio_ci=(0.8520462587912048,
1.340148950273938))),
(Parameters(table=[[100, 2], [1000, 5]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.1300759363430016,
conditional_odds_ratio=0.25055839934223,
conditional_odds_ratio_ci=(0.02502345007115455,
6.304424772117853))),
(Parameters(table=[[2, 7], [8, 2]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.02301413756522116,
conditional_odds_ratio=0.0858623513573622,
conditional_odds_ratio_ci=(0.001923034001462487,
1.53670836950172))),
(Parameters(table=[[5, 1], [10, 10]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.1973244147157191,
conditional_odds_ratio=4.725646047336587,
conditional_odds_ratio_ci=(0.2397970951413721,
1291.342011095509))),
(Parameters(table=[[5, 15], [20, 20]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.09580440012477633,
conditional_odds_ratio=0.3394396617440851,
conditional_odds_ratio_ci=(0.05127576113762925,
1.717176678806983))),
(Parameters(table=[[5, 16], [16, 25]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.2697004098849359,
conditional_odds_ratio=0.4937791394540491,
conditional_odds_ratio_ci=(0.07498546954483619,
2.506969905199901))),
(Parameters(table=[[10, 5], [10, 1]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.1973244147157192,
conditional_odds_ratio=0.2116112781158479,
conditional_odds_ratio_ci=(0.0007743881879531337,
4.170192301163831))),
(Parameters(table=[[10, 5], [10, 0]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.06126482213438735,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
2.642491011905582))),
(Parameters(table=[[5, 0], [1, 4]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.04761904761904762,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0.496935393325443,
Inf))),
(Parameters(table=[[0, 5], [1, 4]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
198.019801980198))),
(Parameters(table=[[5, 1], [0, 4]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.04761904761904761,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0.496935393325443,
Inf))),
(Parameters(table=[[0, 1], [3, 2]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
198.019801980198))),
(Parameters(table=[[200, 7], [8, 300]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=2.005657880389071e-122,
conditional_odds_ratio=977.7866978606228,
conditional_odds_ratio_ci=(270.0334165523604,
5461.333333326708))),
(Parameters(table=[[28, 21], [6, 1957]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=5.728437460831947e-44,
conditional_odds_ratio=425.2403028434684,
conditional_odds_ratio_ci=(116.7944750275836,
1931.995993191814))),
(Parameters(table=[[190, 800], [200, 900]],
confidence_level=0.99,
alternative='two.sided'),
RResults(pvalue=0.574111858126088,
conditional_odds_ratio=1.068697577856801,
conditional_odds_ratio_ci=(0.7949398282935892,
1.436229679394333))),
(Parameters(table=[[100, 2], [1000, 5]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.1300759363430016,
conditional_odds_ratio=0.25055839934223,
conditional_odds_ratio_ci=(0,
1.797867027270803))),
(Parameters(table=[[2, 7], [8, 2]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.0185217259520665,
conditional_odds_ratio=0.0858623513573622,
conditional_odds_ratio_ci=(0,
0.6785254803404526))),
(Parameters(table=[[5, 1], [10, 10]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.9782608695652173,
conditional_odds_ratio=4.725646047336587,
conditional_odds_ratio_ci=(0,
127.8497388102893))),
(Parameters(table=[[5, 15], [20, 20]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.05625775074399956,
conditional_odds_ratio=0.3394396617440851,
conditional_odds_ratio_ci=(0,
1.032332939718425))),
(Parameters(table=[[5, 16], [16, 25]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.1808979350599346,
conditional_odds_ratio=0.4937791394540491,
conditional_odds_ratio_ci=(0,
1.502407513296985))),
(Parameters(table=[[10, 5], [10, 1]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.1652173913043479,
conditional_odds_ratio=0.2116112781158479,
conditional_odds_ratio_ci=(0,
1.820421051562392))),
(Parameters(table=[[10, 5], [10, 0]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.0565217391304348,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
1.06224603077045))),
(Parameters(table=[[5, 0], [1, 4]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[0, 5], [1, 4]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.5,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
19.00192394479939))),
(Parameters(table=[[5, 1], [0, 4]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[0, 1], [3, 2]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.4999999999999999,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
19.00192394479939))),
(Parameters(table=[[200, 7], [8, 300]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=977.7866978606228,
conditional_odds_ratio_ci=(0,
3045.460216525746))),
(Parameters(table=[[28, 21], [6, 1957]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=425.2403028434684,
conditional_odds_ratio_ci=(0,
1186.440170942579))),
(Parameters(table=[[190, 800], [200, 900]],
confidence_level=0.95,
alternative='less'),
RResults(pvalue=0.7416227010368963,
conditional_odds_ratio=1.068697577856801,
conditional_odds_ratio_ci=(0,
1.293551891610822))),
(Parameters(table=[[100, 2], [1000, 5]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.1300759363430016,
conditional_odds_ratio=0.25055839934223,
conditional_odds_ratio_ci=(0,
4.375946050832565))),
(Parameters(table=[[2, 7], [8, 2]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.0185217259520665,
conditional_odds_ratio=0.0858623513573622,
conditional_odds_ratio_ci=(0,
1.235282118191202))),
(Parameters(table=[[5, 1], [10, 10]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.9782608695652173,
conditional_odds_ratio=4.725646047336587,
conditional_odds_ratio_ci=(0,
657.2063583945989))),
(Parameters(table=[[5, 15], [20, 20]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.05625775074399956,
conditional_odds_ratio=0.3394396617440851,
conditional_odds_ratio_ci=(0,
1.498867660683128))),
(Parameters(table=[[5, 16], [16, 25]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.1808979350599346,
conditional_odds_ratio=0.4937791394540491,
conditional_odds_ratio_ci=(0,
2.186159386716762))),
(Parameters(table=[[10, 5], [10, 1]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.1652173913043479,
conditional_odds_ratio=0.2116112781158479,
conditional_odds_ratio_ci=(0,
3.335351451901569))),
(Parameters(table=[[10, 5], [10, 0]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.0565217391304348,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
2.075407697450433))),
(Parameters(table=[[5, 0], [1, 4]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[0, 5], [1, 4]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.5,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
99.00009507969122))),
(Parameters(table=[[5, 1], [0, 4]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[0, 1], [3, 2]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.4999999999999999,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
99.00009507969123))),
(Parameters(table=[[200, 7], [8, 300]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=977.7866978606228,
conditional_odds_ratio_ci=(0,
4503.078257659934))),
(Parameters(table=[[28, 21], [6, 1957]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=1,
conditional_odds_ratio=425.2403028434684,
conditional_odds_ratio_ci=(0,
1811.766127544222))),
(Parameters(table=[[190, 800], [200, 900]],
confidence_level=0.99,
alternative='less'),
RResults(pvalue=0.7416227010368963,
conditional_odds_ratio=1.068697577856801,
conditional_odds_ratio_ci=(0,
1.396522811516685))),
(Parameters(table=[[100, 2], [1000, 5]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.979790445314723,
conditional_odds_ratio=0.25055839934223,
conditional_odds_ratio_ci=(0.05119649909830196,
Inf))),
(Parameters(table=[[2, 7], [8, 2]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.9990149169715733,
conditional_odds_ratio=0.0858623513573622,
conditional_odds_ratio_ci=(0.007163749169069961,
Inf))),
(Parameters(table=[[5, 1], [10, 10]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.1652173913043478,
conditional_odds_ratio=4.725646047336587,
conditional_odds_ratio_ci=(0.5493234651081089,
Inf))),
(Parameters(table=[[5, 15], [20, 20]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.9849086665340765,
conditional_odds_ratio=0.3394396617440851,
conditional_odds_ratio_ci=(0.1003538933958604,
Inf))),
(Parameters(table=[[5, 16], [16, 25]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.9330176609214881,
conditional_odds_ratio=0.4937791394540491,
conditional_odds_ratio_ci=(0.146507416280863,
Inf))),
(Parameters(table=[[10, 5], [10, 1]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.9782608695652174,
conditional_odds_ratio=0.2116112781158479,
conditional_odds_ratio_ci=(0.007821681994077808,
Inf))),
(Parameters(table=[[10, 5], [10, 0]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[5, 0], [1, 4]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.02380952380952382,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(1.487678929918272,
Inf))),
(Parameters(table=[[0, 5], [1, 4]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[5, 1], [0, 4]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.0238095238095238,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(1.487678929918272,
Inf))),
(Parameters(table=[[0, 1], [3, 2]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[200, 7], [8, 300]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=2.005657880388915e-122,
conditional_odds_ratio=977.7866978606228,
conditional_odds_ratio_ci=(397.784359748113,
Inf))),
(Parameters(table=[[28, 21], [6, 1957]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=5.728437460831983e-44,
conditional_odds_ratio=425.2403028434684,
conditional_odds_ratio_ci=(174.7148056880929,
Inf))),
(Parameters(table=[[190, 800], [200, 900]],
confidence_level=0.95,
alternative='greater'),
RResults(pvalue=0.2959825901308897,
conditional_odds_ratio=1.068697577856801,
conditional_odds_ratio_ci=(0.8828406663967776,
Inf))),
(Parameters(table=[[100, 2], [1000, 5]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.979790445314723,
conditional_odds_ratio=0.25055839934223,
conditional_odds_ratio_ci=(0.03045407081240429,
Inf))),
(Parameters(table=[[2, 7], [8, 2]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.9990149169715733,
conditional_odds_ratio=0.0858623513573622,
conditional_odds_ratio_ci=(0.002768053063547901,
Inf))),
(Parameters(table=[[5, 1], [10, 10]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.1652173913043478,
conditional_odds_ratio=4.725646047336587,
conditional_odds_ratio_ci=(0.2998184792279909,
Inf))),
(Parameters(table=[[5, 15], [20, 20]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.9849086665340765,
conditional_odds_ratio=0.3394396617440851,
conditional_odds_ratio_ci=(0.06180414342643172,
Inf))),
(Parameters(table=[[5, 16], [16, 25]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.9330176609214881,
conditional_odds_ratio=0.4937791394540491,
conditional_odds_ratio_ci=(0.09037094010066403,
Inf))),
(Parameters(table=[[10, 5], [10, 1]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.9782608695652174,
conditional_odds_ratio=0.2116112781158479,
conditional_odds_ratio_ci=(0.001521592095430679,
Inf))),
(Parameters(table=[[10, 5], [10, 0]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[5, 0], [1, 4]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.02380952380952382,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0.6661157890359722,
Inf))),
(Parameters(table=[[0, 5], [1, 4]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[5, 1], [0, 4]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.0238095238095238,
conditional_odds_ratio=Inf,
conditional_odds_ratio_ci=(0.6661157890359725,
Inf))),
(Parameters(table=[[0, 1], [3, 2]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=1,
conditional_odds_ratio=0,
conditional_odds_ratio_ci=(0,
Inf))),
(Parameters(table=[[200, 7], [8, 300]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=2.005657880388915e-122,
conditional_odds_ratio=977.7866978606228,
conditional_odds_ratio_ci=(297.9619252357688,
Inf))),
(Parameters(table=[[28, 21], [6, 1957]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=5.728437460831983e-44,
conditional_odds_ratio=425.2403028434684,
conditional_odds_ratio_ci=(130.3213490295859,
Inf))),
(Parameters(table=[[190, 800], [200, 900]],
confidence_level=0.99,
alternative='greater'),
RResults(pvalue=0.2959825901308897,
conditional_odds_ratio=1.068697577856801,
conditional_odds_ratio_ci=(0.8176272148267533,
Inf))),
]
| 27,349
| 43.983553
| 77
|
py
|
scipy
|
scipy-main/scipy/stats/tests/data/_mvt.py
|
# flake8: noqa
import math
import numpy as np
from scipy import special
from scipy.stats._qmc import primes_from_2_to
def _primes(n):
# Defined to facilitate comparison between translation and source
# In Matlab, primes(10.5) -> first four primes, primes(11.5) -> first five
return primes_from_2_to(math.ceil(n))
def _gaminv(a, b):
# Defined to facilitate comparison between translation and source
# Matlab's `gaminv` is like `special.gammaincinv` but args are reversed
return special.gammaincinv(b, a)
def _qsimvtv(m, nu, sigma, a, b, rng):
"""Estimates the multivariate t CDF using randomized QMC
Parameters
----------
m : int
The number of points
nu : float
Degrees of freedom
sigma : ndarray
A 2D positive semidefinite covariance matrix
a : ndarray
Lower integration limits
b : ndarray
Upper integration limits.
rng : Generator
Pseudorandom number generator
Returns
-------
p : float
The estimated CDF.
e : float
An absolute error estimate.
"""
# _qsimvtv is a Python translation of the Matlab function qsimvtv,
# semicolons and all.
#
# This function uses an algorithm given in the paper
# "Comparison of Methods for the Numerical Computation of
# Multivariate t Probabilities", in
# J. of Computational and Graphical Stat., 11(2002), pp. 950-971, by
# Alan Genz and Frank Bretz
#
# The primary references for the numerical integration are
# "On a Number-Theoretical Integration Method"
# H. Niederreiter, Aequationes Mathematicae, 8(1972), pp. 304-11.
# and
# "Randomization of Number Theoretic Methods for Multiple Integration"
# R. Cranley & T.N.L. Patterson, SIAM J Numer Anal, 13(1976), pp. 904-14.
#
# Alan Genz is the author of this function and following Matlab functions.
# Alan Genz, WSU Math, PO Box 643113, Pullman, WA 99164-3113
# Email : alangenz@wsu.edu
#
# Copyright (C) 2013, Alan Genz, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. The contributor name(s) may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Initialization
sn = max(1, math.sqrt(nu)); ch, az, bz = _chlrps(sigma, a/sn, b/sn)
n = len(sigma); N = 10; P = math.ceil(m/N); on = np.ones(P); p = 0; e = 0
ps = np.sqrt(_primes(5*n*math.log(n+4)/4)); q = ps[:, np.newaxis] # Richtmyer gens.
# Randomization loop for ns samples
c = None; dc = None
for S in range(N):
vp = on.copy(); s = np.zeros((n, P))
for i in range(n):
x = np.abs(2*np.mod(q[i]*np.arange(1, P+1) + rng.random(), 1)-1) # periodizing transform
if i == 0:
r = on
if nu > 0:
r = np.sqrt(2*_gaminv(x, nu/2))
else:
y = _Phinv(c + x*dc)
s[i:] += ch[i:, i-1:i] * y
si = s[i, :]; c = on.copy(); ai = az[i]*r - si; d = on.copy(); bi = bz[i]*r - si
c[ai <= -9] = 0; tl = abs(ai) < 9; c[tl] = _Phi(ai[tl])
d[bi <= -9] = 0; tl = abs(bi) < 9; d[tl] = _Phi(bi[tl])
dc = d - c; vp = vp * dc
d = (np.mean(vp) - p)/(S + 1); p = p + d; e = (S - 1)*e/(S + 1) + d**2
e = math.sqrt(e) # error estimate is 3 times std error with N samples.
return p, e
# Standard statistical normal distribution functions
def _Phi(z):
return special.ndtr(z)
def _Phinv(p):
return special.ndtri(p)
def _chlrps(R, a, b):
"""
Computes permuted and scaled lower Cholesky factor c for R which may be
singular, also permuting and scaling integration limit vectors a and b.
"""
ep = 1e-10 # singularity tolerance
eps = np.finfo(R.dtype).eps
n = len(R); c = R.copy(); ap = a.copy(); bp = b.copy(); d = np.sqrt(np.maximum(np.diag(c), 0))
for i in range(n):
if d[i] > 0:
c[:, i] /= d[i]; c[i, :] /= d[i]
ap[i] /= d[i]; bp[i] /= d[i]
y = np.zeros((n, 1)); sqtp = math.sqrt(2*math.pi)
for k in range(n):
im = k; ckk = 0; dem = 1; s = 0
for i in range(k, n):
if c[i, i] > eps:
cii = math.sqrt(max(c[i, i], 0))
if i > 0: s = c[i, :k] @ y[:k]
ai = (ap[i]-s)/cii; bi = (bp[i]-s)/cii; de = _Phi(bi)-_Phi(ai)
if de <= dem:
ckk = cii; dem = de; am = ai; bm = bi; im = i
if im > k:
ap[[im, k]] = ap[[k, im]]; bp[[im, k]] = bp[[k, im]]; c[im, im] = c[k, k]
t = c[im, :k].copy(); c[im, :k] = c[k, :k]; c[k, :k] = t
t = c[im+1:, im].copy(); c[im+1:, im] = c[im+1:, k]; c[im+1:, k] = t
t = c[k+1:im, k].copy(); c[k+1:im, k] = c[im, k+1:im].T; c[im, k+1:im] = t.T
if ckk > ep*(k+1):
c[k, k] = ckk; c[k, k+1:] = 0
for i in range(k+1, n):
c[i, k] = c[i, k]/ckk; c[i, k+1:i+1] = c[i, k+1:i+1] - c[i, k]*c[k+1:i+1, k].T
if abs(dem) > ep:
y[k] = (np.exp(-am**2/2) - np.exp(-bm**2/2)) / (sqtp*dem)
else:
y[k] = (am + bm) / 2
if am < -10:
y[k] = bm
elif bm > 10:
y[k] = am
c[k, :k+1] /= ckk; ap[k] /= ckk; bp[k] /= ckk
else:
c[k:, k] = 0; y[k] = (ap[k] + bp[k])/2
pass
return c, ap, bp
| 6,920
| 39.00578
| 101
|
py
|
scipy
|
scipy-main/scipy/stats/_unuran/setup.py
|
import os
def unuran_pre_build_hook(build_clib, build_info):
from scipy._build_utils.compiler_helper import (get_c_std_flag,
try_compile, has_flag)
c = build_clib.compiler
c_flag = get_c_std_flag(c)
if c_flag is not None:
if "extra_compiler_args" not in build_info:
build_info["extra_compiler_args"] = []
build_info["extra_compiler_args"].append(c_flag)
deps = {"unistd.h": ["HAVE_DECL_GETOPT", "HAVE_UNISTD_H"],
"dlfcn.h": ["HAVE_DLFCN_H"],
"sys/time.h": ["HAVE_GETTIMEOFDAY", "HAVE_SYS_TIME_H",
"TIME_WITH_SYS_TIME"],
"memory.h": ["HAVE_MEMORY_H"],
"strings.h": ["HAVE_STRCASECMP", "HAVE_STRINGS_H"],
"sys/stat.h": ["HAVE_SYS_STAT_H"],
"sys/types.h": ["HAVE_SYS_TYPES_H"]}
for dep in deps:
has_dep = try_compile(c, code=f"#include <{dep}>\n"
"int main(int argc, char **argv){}")
if has_dep:
for macro in deps[dep]:
build_info["macros"].append((macro, "1"))
if has_flag(c, flag="-lm"):
try:
build_info["libraries"].append("m")
except KeyError:
build_info["libraries"] = ["m"]
def _get_sources(dirs):
sources = []
for dir_ in dirs:
files = [
file for file in os.listdir(dir_) if (not os.path.isdir(file))
]
path = [str(dir_ / file) for file in files]
sources += [source for source in path if (source.endswith(".c"))]
return sources
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._lib._unuran_utils import _unuran_dir
if not os.path.exists(_unuran_dir(ret_path=True) / 'README.md'):
raise RuntimeError("Missing the `unuran` submodule! Run `git "
"submodule update --init` to fix this.")
config = Configuration("_unuran", parent_package, top_path)
# UNU.RAN info
UNURAN_DIR = _unuran_dir(ret_path=True).resolve()
UNURAN_VERSION = "16:0:0"
DEFINE_MACROS = [
("HAVE_ALARM", "1"),
("HAVE_DECL_ALARM", "1"),
("HAVE_DECL_HUGE_VAL", "1"),
("HAVE_DECL_INFINITY", "1"),
("HAVE_DECL_ISFINITE", "0"),
("HAVE_DECL_ISINF", "0"),
("HAVE_DECL_ISNAN", "1"),
("HAVE_DECL_LOG1P", "1"),
("HAVE_DECL_SIGNAL", "1"),
("HAVE_DECL_SNPRINTF", "1"),
("HAVE_DECL_VSNPRINTF", "1"),
("HAVE_FLOAT_H", "1"),
("HAVE_FLOOR", "1"),
("HAVE_IEEE_COMPARISONS", "1"),
("HAVE_INTTYPES_H", "1"),
("HAVE_LIBM", "1"),
("HAVE_LIMITS_H", "1"),
("HAVE_POW", "1"),
("HAVE_SIGNAL", "1"),
("HAVE_SQRT", "1"),
("HAVE_STDINT_H", "1"),
("HAVE_STDLIB_H", "1"),
("HAVE_STRCHR", "1"),
("HAVE_STRING_H", "1"),
("HAVE_STRTOL", "1"),
("HAVE_STRTOUL", "1"),
("LT_OBJDIR", '".libs/"'),
("PACKAGE", '"unuran"'),
("PACKAGE_BUGREPORT", '"unuran@statmath.wu.ac.at"'),
("PACKAGE_NAME", '"unuran"'),
("PACKAGE_STRING", '"unuran %s"' % UNURAN_VERSION),
("PACKAGE_TARNAME", '"unuran"'),
("PACKAGE_URL", '""'),
("PACKAGE_VERSION", '"%s"' % UNURAN_VERSION),
("STDC_HEADERS", "1"),
("UNUR_ENABLE_INFO", "1"),
("VERSION", '"%s"' % UNURAN_VERSION),
("HAVE_CONFIG_H", "1"),
("_ISOC99_SOURCE", "1"),
]
UNURAN_DIRS = [
os.path.join("unuran", "src"),
os.path.join("unuran", "src", "distr"),
os.path.join("unuran", "src", "distributions"),
os.path.join("unuran", "src", "methods"),
os.path.join("unuran", "src", "parser"),
os.path.join("unuran", "src", "specfunct"),
os.path.join("unuran", "src", "urng"),
os.path.join("unuran", "src", "utils"),
os.path.join("unuran", "src", "tests"),
]
UNURAN_SOURCE_DIRS = [UNURAN_DIR / dir_ for dir_ in UNURAN_DIRS]
sources = _get_sources(UNURAN_SOURCE_DIRS[1:])
ext = config.add_extension(
"unuran_wrapper",
sources=["unuran_wrapper.c"] + sources,
libraries=[],
include_dirs=[str(dir_.resolve()) for dir_ in UNURAN_SOURCE_DIRS]
+ [
os.path.join(
os.path.dirname(__file__), "..", "..", "_lib", "src"
)
]
+ [os.path.dirname(__file__)],
language="c",
define_macros=DEFINE_MACROS,
)
ext.pre_build_hook = unuran_pre_build_hook
config.add_data_files("*.pxd")
config.add_data_files("*.pyi")
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| 4,856
| 33.204225
| 74
|
py
|
scipy
|
scipy-main/scipy/stats/_unuran/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/stats/_rcont/setup.py
|
from os.path import join
import numpy as np
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_rcont', parent_package, top_path)
config.add_extension(
'rcont',
sources=['rcont.c', '_rcont.c', 'logfactorial.c'],
include_dirs=[np.get_include()],
library_dirs=[join(np.get_include(),
'..', '..', 'random', 'lib'),
join(np.get_include(),
'..', 'lib')],
libraries=['npyrandom', 'npymath']
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 730
| 27.115385
| 62
|
py
|
scipy
|
scipy-main/scipy/stats/_rcont/__init__.py
|
#
from .rcont import rvs_rcont1, rvs_rcont2
__all__ = ["rvs_rcont1", "rvs_rcont2"]
| 84
| 16
| 41
|
py
|
scipy
|
scipy-main/scipy/fft/_realtransforms.py
|
from ._basic import _dispatch
from scipy._lib.uarray import Dispatchable
import numpy as np
__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
@_dispatch
def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, *, orthogonalize=None):
"""
Return multidimensional Discrete Cosine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
s : int or array_like of ints or None, optional
The shape of the result. If both `s` and `axes` (see below) are None,
`s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
``numpy.take(x.shape, axes, axis=0)``.
If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
``s[i]``.
If any element of `s` is -1, the size of the corresponding dimension of
`x` is used.
axes : int or array_like of ints or None, optional
Axes over which the DCT is computed. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized DCT variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idctn : Inverse multidimensional DCT
Notes
-----
For full details of the DCT types and normalization modes, as well as
references, see `dct`.
Examples
--------
>>> import numpy as np
>>> from scipy.fft import dctn, idctn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idctn(dctn(y)))
True
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, orthogonalize=None):
"""
Return multidimensional Inverse Discrete Cosine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
s : int or array_like of ints or None, optional
The shape of the result. If both `s` and `axes` (see below) are
None, `s` is ``x.shape``; if `s` is None but `axes` is
not None, then `s` is ``numpy.take(x.shape, axes, axis=0)``.
If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
``s[i]``.
If any element of `s` is -1, the size of the corresponding dimension of
`x` is used.
axes : int or array_like of ints or None, optional
Axes over which the IDCT is computed. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized IDCT variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
dctn : multidimensional DCT
Notes
-----
For full details of the IDCT types and normalization modes, as well as
references, see `idct`.
Examples
--------
>>> import numpy as np
>>> from scipy.fft import dctn, idctn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idctn(dctn(y)))
True
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, orthogonalize=None):
"""
Return multidimensional Discrete Sine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
s : int or array_like of ints or None, optional
The shape of the result. If both `s` and `axes` (see below) are None,
`s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
``numpy.take(x.shape, axes, axis=0)``.
If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
``s[i]``.
If any element of `shape` is -1, the size of the corresponding dimension
of `x` is used.
axes : int or array_like of ints or None, optional
Axes over which the DST is computed. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized DST variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idstn : Inverse multidimensional DST
Notes
-----
For full details of the DST types and normalization modes, as well as
references, see `dst`.
Examples
--------
>>> import numpy as np
>>> from scipy.fft import dstn, idstn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idstn(dstn(y)))
True
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, orthogonalize=None):
"""
Return multidimensional Inverse Discrete Sine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
s : int or array_like of ints or None, optional
The shape of the result. If both `s` and `axes` (see below) are None,
`s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
``numpy.take(x.shape, axes, axis=0)``.
If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
``s[i]``.
If any element of `s` is -1, the size of the corresponding dimension of
`x` is used.
axes : int or array_like of ints or None, optional
Axes over which the IDST is computed. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized IDST variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
dstn : multidimensional DST
Notes
-----
For full details of the IDST types and normalization modes, as well as
references, see `idst`.
Examples
--------
>>> import numpy as np
>>> from scipy.fft import dstn, idstn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idstn(dstn(y)))
True
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None,
orthogonalize=None):
r"""Return the Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized DCT variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idct : Inverse DCT
Notes
-----
For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
MATLAB ``dct(x)``.
.. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct
correspondence with the direct Fourier transform. To recover
it you must specify ``orthogonalize=False``.
For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
overall factor in both directions. By default, the transform is also
orthogonalized which for types 1, 2 and 3 means the transform definition is
modified to give orthogonality of the DCT matrix (see below).
For ``norm="backward"``, there is no scaling on `dct` and the `idct` is
scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For
``norm="forward"`` the ``1/N`` normalization is applied to the forward
`dct` instead and the `idct` is unnormalized.
There are, theoretically, 8 types of the DCT, only the first 4 types are
implemented in SciPy.'The' DCT generally refers to DCT type 2, and 'the'
Inverse DCT generally refers to DCT type 3.
**Type I**
There are several definitions of the DCT-I; we use the following
(for ``norm="backward"``)
.. math::
y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
\frac{\pi k n}{N-1} \right)
If ``orthogonalize=True``, ``x[0]`` and ``x[N-1]`` are multiplied by a
scaling factor of :math:`\sqrt{2}`, and ``y[0]`` and ``y[N-1]`` are divided
by :math:`\sqrt{2}`. When combined with ``norm="ortho"``, this makes the
corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``).
.. note::
The DCT-I is only supported for input size > 1.
**Type II**
There are several definitions of the DCT-II; we use the following
(for ``norm="backward"``)
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
If ``orthogonalize=True``, ``y[0]`` is divided by :math:`\sqrt{2}` which,
when combined with ``norm="ortho"``, makes the corresponding matrix of
coefficients orthonormal (``O @ O.T = np.eye(N)``).
**Type III**
There are several definitions, we use the following (for
``norm="backward"``)
.. math::
y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
If ``orthogonalize=True``, ``x[0]`` terms are multiplied by
:math:`\sqrt{2}` which, when combined with ``norm="ortho"``, makes the
corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``).
The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
the orthonormalized DCT-II.
**Type IV**
There are several definitions of the DCT-IV; we use the following
(for ``norm="backward"``)
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
``orthogonalize`` has no effect here, as the DCT-IV matrix is already
orthogonal up to a scale factor of ``2N``.
References
----------
.. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
Makhoul, `IEEE Transactions on acoustics, speech and signal
processing` vol. 28(1), pp. 27-34,
:doi:`10.1109/TASSP.1980.1163351` (1980).
.. [2] Wikipedia, "Discrete cosine transform",
https://en.wikipedia.org/wiki/Discrete_cosine_transform
Examples
--------
The Type 1 DCT is equivalent to the FFT (though faster) for real,
even-symmetrical inputs. The output is also real and even-symmetrical.
Half of the FFT input is used to generate half of the FFT output:
>>> from scipy.fft import fft, dct
>>> import numpy as np
>>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
array([ 30., -8., 6., -2., 6., -8.])
>>> dct(np.array([4., 3., 5., 10.]), 1)
array([ 30., -8., 6., -2.])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
workers=None, orthogonalize=None):
"""
Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the idct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized IDCT variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
idct : ndarray of real
The transformed input array.
See Also
--------
dct : Forward DCT
Notes
-----
For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
MATLAB ``idct(x)``.
.. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct
correspondence with the inverse direct Fourier transform. To
recover it you must specify ``orthogonalize=False``.
For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
overall factor in both directions. By default, the transform is also
orthogonalized which for types 1, 2 and 3 means the transform definition is
modified to give orthogonality of the IDCT matrix (see `dct` for the full
definitions).
'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III.
The IDCT is equivalent to a normal DCT except for the normalization and
type. DCT type 1 and 4 are their own inverse and DCTs 2 and 3 are each
other's inverses.
Examples
--------
The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
inputs. The output is also real and even-symmetrical. Half of the IFFT
input is used to generate half of the IFFT output:
>>> from scipy.fft import ifft, idct
>>> import numpy as np
>>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
array([ 4., 3., 5., 10., 5., 3.])
>>> idct(np.array([ 30., -8., 6., -2.]), 1)
array([ 4., 3., 5., 10.])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None,
orthogonalize=None):
r"""
Return the Discrete Sine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dst is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized DST variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
dst : ndarray of reals
The transformed input array.
See Also
--------
idst : Inverse DST
Notes
-----
.. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct
correspondence with the direct Fourier transform. To recover
it you must specify ``orthogonalize=False``.
For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
overall factor in both directions. By default, the transform is also
orthogonalized which for types 2 and 3 means the transform definition is
modified to give orthogonality of the DST matrix (see below).
For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is
scaled by ``1/N`` where ``N`` is the "logical" size of the DST.
There are, theoretically, 8 types of the DST for different combinations of
even/odd boundary conditions and boundary off sets [1]_, only the first
4 types are implemented in SciPy.
**Type I**
There are several definitions of the DST-I; we use the following for
``norm="backward"``. DST-I assumes the input is odd around :math:`n=-1` and
:math:`n=N`.
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
Note that the DST-I is only supported for input size > 1.
The (unnormalized) DST-I is its own inverse, up to a factor :math:`2(N+1)`.
The orthonormalized DST-I is exactly its own inverse.
``orthogonalize`` has no effect here, as the DST-I matrix is already
orthogonal up to a scale factor of ``2N``.
**Type II**
There are several definitions of the DST-II; we use the following for
``norm="backward"``. DST-II assumes the input is odd around :math:`n=-1/2` and
:math:`n=N-1/2`; the output is odd around :math:`k=-1` and even around :math:`k=N-1`
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
If ``orthogonalize=True``, ``y[-1]`` is divided :math:`\sqrt{2}` which, when
combined with ``norm="ortho"``, makes the corresponding matrix of
coefficients orthonormal (``O @ O.T = np.eye(N)``).
**Type III**
There are several definitions of the DST-III, we use the following (for
``norm="backward"``). DST-III assumes the input is odd around :math:`n=-1` and
even around :math:`n=N-1`
.. math::
y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
\frac{\pi(2k+1)(n+1)}{2N}\right)
If ``orthogonalize=True``, ``x[-1]`` is multiplied by :math:`\sqrt{2}`
which, when combined with ``norm="ortho"``, makes the corresponding matrix
of coefficients orthonormal (``O @ O.T = np.eye(N)``).
The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
to a factor :math:`2N`. The orthonormalized DST-III is exactly the inverse of the
orthonormalized DST-II.
**Type IV**
There are several definitions of the DST-IV, we use the following (for
``norm="backward"``). DST-IV assumes the input is odd around :math:`n=-0.5` and
even around :math:`n=N-0.5`
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
``orthogonalize`` has no effect here, as the DST-IV matrix is already
orthogonal up to a scale factor of ``2N``.
The (unnormalized) DST-IV is its own inverse, up to a factor :math:`2N`. The
orthonormalized DST-IV is exactly its own inverse.
References
----------
.. [1] Wikipedia, "Discrete sine transform",
https://en.wikipedia.org/wiki/Discrete_sine_transform
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
workers=None, orthogonalize=None):
"""
Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the idst is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see Notes). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
orthogonalize : bool, optional
Whether to use the orthogonalized IDST variant (see Notes).
Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
.. versionadded:: 1.8.0
Returns
-------
idst : ndarray of real
The transformed input array.
See Also
--------
dst : Forward DST
Notes
-----
.. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct
correspondence with the inverse direct Fourier transform.
For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
overall factor in both directions. By default, the transform is also
orthogonalized which for types 2 and 3 means the transform definition is
modified to give orthogonality of the DST matrix (see `dst` for the full
definitions).
'The' IDST is the IDST-II, which is the same as the normalized DST-III.
The IDST is equivalent to a normal DST except for the normalization and
type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each
other's inverses.
"""
return (Dispatchable(x, np.ndarray),)
| 25,282
| 35.430836
| 88
|
py
|
scipy
|
scipy-main/scipy/fft/setup.py
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fft', parent_package, top_path)
config.add_subpackage('_pocketfft')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 381
| 30.833333
| 59
|
py
|
scipy
|
scipy-main/scipy/fft/_basic.py
|
from scipy._lib.uarray import generate_multimethod, Dispatchable
import numpy as np
def _x_replacer(args, kwargs, dispatchables):
"""
uarray argument replacer to replace the transform input array (``x``)
"""
if len(args) > 0:
return (dispatchables[0],) + args[1:], kwargs
kw = kwargs.copy()
kw['x'] = dispatchables[0]
return args, kw
def _dispatch(func):
"""
Function annotation that creates a uarray multimethod from the function
"""
return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft")
@_dispatch
def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 1-D discrete Fourier Transform.
This function computes the 1-D *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [1]_.
Parameters
----------
x : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode. Default is "backward", meaning no normalization on
the forward transforms and scaling by ``1/n`` on the `ifft`.
"forward" instead applies the ``1/n`` factor on the forward tranform.
For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``.
.. versionadded:: 1.6.0
``norm={"forward", "backward"}`` options were added
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See the notes below for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``. See below for more
details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `x`.
See Also
--------
ifft : The inverse of `fft`.
fft2 : The 2-D FFT.
fftn : The N-D FFT.
rfftn : The N-D FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
next_fast_len : Size to pad input to for most efficient transforms
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform
(DFT) can be calculated efficiently, by using symmetries in the calculated
terms. The symmetry is highest when `n` is a power of 2, and the transform
is therefore most efficient for these sizes. For poorly factorizable sizes,
`scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than
O(`n` log `n`). Further performance improvements may be seen by zero-padding
the input using `next_fast_len`.
If ``x`` is a 1d array, then the `fft` is equivalent to ::
y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n))
The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach
the Nyquist frequency and wrap around to the negative-frequency terms. So,
for an 8-point transform, the frequencies of the result are
[0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the
zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3],
use `fftshift`.
Transforms can be done in single, double, or extended precision (long
double) floating point. Half precision inputs will be converted to single
precision and non-floating-point inputs will be converted to double
precision.
If the data type of ``x`` is real, a "real FFT" algorithm is automatically
used, which roughly halves the computation time. To increase efficiency
a little further, use `rfft`, which does the same calculation, but only
outputs half of the symmetrical spectrum. If the data are both real and
symmetrical, the `dct` can again double the efficiency, by generating
half of the spectrum from half of the signal.
When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may
be used by the implementation in any way. This may include reusing the
memory for the result, but this is in no way guaranteed. You should not
rely on the contents of ``x`` after the transform as this may change in
future without warning.
The ``workers`` argument specifies the maximum number of parallel jobs to
split the FFT computation into. This will execute independent 1-D
FFTs within ``x``. So, ``x`` must be at least 2-D and the
non-transformed axes must be large enough to split into chunks. If ``x`` is
too small, fewer jobs may be used than requested.
References
----------
.. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [2] Bluestein, L., 1970, "A linear filtering approach to the
computation of discrete Fourier transform". *IEEE Transactions on
Audio and Electroacoustics.* 18 (4): 451-455.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part:
>>> from scipy.fft import fft, fftfreq, fftshift
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = fftshift(fft(np.sin(t)))
>>> freq = fftshift(fftfreq(t.shape[-1]))
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 1-D inverse discrete Fourier Transform.
This function computes the inverse of the 1-D *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(x)) == x`` to within numerical accuracy.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``x[0]`` should contain the zero frequency term,
* ``x[1:n//2]`` should contain the positive-frequency terms,
* ``x[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``x[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `fft` for details.
Parameters
----------
x : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `x`.
See Also
--------
fft : The 1-D (forward) FFT, of which `ifft` is the inverse.
ifft2 : The 2-D inverse FFT.
ifftn : The N-D inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
If ``x`` is a 1-D array, then the `ifft` is equivalent to ::
y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x)
As with `fft`, `ifft` has support for all floating point types and is
optimized for real input.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> scipy.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*rng.uniform(0, 2*np.pi, (20,)))
>>> s = scipy.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 1-D discrete Fourier Transform for real input.
This function computes the 1-D *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
x : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
irfft : The inverse of `rfft`.
fft : The 1-D FFT of general (complex) input.
fftn : The N-D FFT.
rfft2 : The 2-D FFT of real input.
rfftn : The N-D FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e., the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> import scipy.fft
>>> scipy.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> scipy.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Computes the inverse of `rfft`.
This function computes the inverse of the 1-D *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e., the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
x : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)``, where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `x`.
See Also
--------
rfft : The 1-D FFT of real input, of which `irfft` is inverse.
fft : The 1-D FFT.
irfft2 : The inverse of the 2-D FFT of real input.
irfftn : The inverse of the N-D FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `x`, where `x` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The default value of `n` assumes an even output length. By the Hermitian
symmetry, the last imaginary component must be 0 and so is ignored. To
avoid losing information, the correct length of the real input *must* be
given.
Examples
--------
>>> import scipy.fft
>>> scipy.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> scipy.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
x : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``,
where ``m`` is the length of the input along the axis specified by
`axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See `fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2``, where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance, as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
rfft : Compute the 1-D FFT for real input.
ihfft : The inverse of `hfft`.
hfftn : Compute the N-D FFT of a Hermitian signal.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So, here, it's `hfft`, for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> from scipy.fft import fft, hfft
>>> import numpy as np
>>> a = 2 * np.pi * np.arange(10) / 10
>>> signal = np.cos(a) + 3j * np.sin(3 * a)
>>> fft(signal).round(10)
array([ -0.+0.j, 5.+0.j, -0.+0.j, 15.-0.j, 0.+0.j, 0.+0.j,
-0.+0.j, -15.-0.j, 0.+0.j, 5.+0.j])
>>> hfft(signal[:6]).round(10) # Input first half of signal
array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
>>> hfft(signal, 10) # Input entire signal and truncate
array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
x : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See `fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See Also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here, the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So, here, it's `hfft`, for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> from scipy.fft import ifft, ihfft
>>> import numpy as np
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> ifft(spectrum)
array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
>>> ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D discrete Fourier Transform.
This function computes the N-D discrete Fourier Transform over
any number of axes in an M-D array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
x : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `x`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
ifftn : The inverse of `fftn`, the inverse N-D FFT.
fft : The 1-D FFT, with definitions and conventions used.
rfftn : The N-D FFT of real input.
fft2 : The 2-D FFT.
fftshift : Shifts zero-frequency terms to centre of array.
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.mgrid[:3, :3, :3][0]
>>> scipy.fft.fftn(x, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> scipy.fft.fftn(x, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + rng.uniform(0, 1, X.shape)
>>> FS = scipy.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D inverse discrete Fourier Transform.
This function computes the inverse of the N-D discrete
Fourier Transform over any number of axes in an M-D array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(x)) == x`` to within numerical accuracy.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e., it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
x : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `x`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
fftn : The forward N-D FFT, of which `ifftn` is the inverse.
ifft : The 1-D inverse FFT.
ifft2 : The 2-D inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.eye(4)
>>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,))
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*rng.uniform(0, 2*np.pi, (20, 20)))
>>> im = scipy.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D discrete Fourier Transform
This function computes the N-D discrete Fourier Transform
over any axes in an M-D array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
x : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two axes are
used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
ifft2 : The inverse 2-D FFT.
fft : The 1-D FFT.
fftn : The N-D FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For 2-D input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `fft` for
definitions and conventions used.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.mgrid[:5, :5][0]
>>> scipy.fft.fft2(x)
array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D inverse discrete Fourier Transform.
This function computes the inverse of the 2-D discrete Fourier
Transform over any number of axes in an M-D array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e., it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
x : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
fft2 : The forward 2-D FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the N-D FFT.
fft : The 1-D FFT.
ifft : The 1-D inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = 4 * np.eye(4)
>>> scipy.fft.ifft2(x)
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D discrete Fourier Transform for real input.
This function computes the N-D discrete Fourier Transform over
any number of axes in an M-D real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
x : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `x`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT
of real input.
fft : The 1-D FFT, with definitions and conventions used.
rfft : The 1-D FFT of real input.
fftn : The N-D FFT.
rfft2 : The 2-D FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.ones((2, 2, 2))
>>> scipy.fft.rfftn(x)
array([[[8.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> scipy.fft.rfftn(x, axes=(2, 0))
array([[[4.+0.j, 0.+0.j], # may vary
[4.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D FFT of a real array.
Parameters
----------
x : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
irfft2 : The inverse of the 2-D FFT of real input.
rfft : The 1-D FFT of real input.
rfftn : Compute the N-D discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Computes the inverse of `rfftn`
This function computes the inverse of the N-D discrete
Fourier Transform for real input over any number of axes in an
M-D array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e., as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
x : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to be
``2*(m-1)``, where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `x`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
rfftn : The forward N-D FFT of real input,
of which `ifftn` is the inverse.
fft : The 1-D FFT, with definitions and conventions used.
irfft : The inverse of the 1-D FFT of real input.
irfft2 : The inverse of the 2-D FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
The default value of `s` assumes an even output length in the final
transformation axis. When performing the final complex to real
transformation, the Hermitian symmetry requires that the last imaginary
component along that axis must be 0 and so it is ignored. To avoid losing
information, the correct length of the real input *must* be given.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.zeros((3, 2, 2))
>>> x[0, 0, 0] = 3 * 2 * 2
>>> scipy.fft.irfftn(x)
array([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Computes the inverse of `rfft2`
Parameters
----------
x : array_like
The input array
s : sequence of ints, optional
Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
rfft2 : The 2-D FFT of real input.
irfft : The inverse of the 1-D FFT of real input.
irfftn : The inverse of the N-D FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D FFT of Hermitian symmetric complex input, i.e., a
signal with a real spectrum.
This function computes the N-D discrete Fourier Transform for a
Hermitian symmetric complex input over any number of axes in an
M-D array by means of the Fast Fourier Transform (FFT). In other
words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s``
here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary
for the same reason ``x.shape`` would be necessary for `irfft`.)
Parameters
----------
x : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to be
``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `x`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`.
fft : The 1-D FFT, with definitions and conventions used.
rfft : Forward FFT of real input.
Notes
-----
For a 1-D signal ``x`` to have a real spectrum, it must satisfy
the Hermitian property::
x[i] == np.conj(x[-i]) for all i
This generalizes into higher dimensions by reflecting over each axis in
turn::
x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ...
This should not be confused with a Hermitian matrix, for which the
transpose is its own conjugate::
x[i, j] == np.conj(x[j, i]) for all i, j
The default value of `s` assumes an even output length in the final
transformation axis. When performing the final complex to real
transformation, the Hermitian symmetry requires that the last imaginary
component along that axis must be 0 and so it is ignored. To avoid losing
information, the correct length of the real input *must* be given.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.ones((3, 2, 2))
>>> scipy.fft.hfftn(x)
array([[[12., 0.],
[ 0., 0.]],
[[ 0., 0.],
[ 0., 0.]],
[[ 0., 0.],
[ 0., 0.]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D FFT of a Hermitian complex array.
Parameters
----------
x : array
Input array, taken to be Hermitian complex.
s : sequence of ints, optional
Shape of the real output.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See `fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The real result of the 2-D Hermitian complex real FFT.
See Also
--------
hfftn : Compute the N-D discrete Fourier Transform for Hermitian
complex input.
Notes
-----
This is really just `hfftn` with different default behavior.
For more details see `hfftn`.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D inverse discrete Fourier Transform for a real
spectrum.
This function computes the N-D inverse discrete Fourier Transform
over any number of axes in an M-D real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining transforms
are complex.
Parameters
----------
x : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `x`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
hfftn : The forward N-D FFT of Hermitian input.
hfft : The 1-D FFT of Hermitian input.
fft : The 1-D FFT, with definitions and conventions used.
fftn : The N-D FFT.
hfft2 : The 2-D FFT of Hermitian input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `ihfft`, then the transform over the remaining axes is
performed as by `ifftn`. The order of the output is the positive part of
the Hermitian output signal, in the same format as `rfft`.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.ones((2, 2, 2))
>>> scipy.fft.ihfftn(x)
array([[[1.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> scipy.fft.ihfftn(x, axes=(2, 0))
array([[[1.+0.j, 0.+0.j], # may vary
[1.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D inverse FFT of a real spectrum.
Parameters
----------
x : array_like
The input array
s : sequence of ints, optional
Shape of the real input to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
ihfftn : Compute the inverse of the N-D FFT of Hermitian input.
Notes
-----
This is really `ihfftn` with different defaults.
For more details see `ihfftn`.
"""
return (Dispatchable(x, np.ndarray),)
| 62,991
| 37.645399
| 90
|
py
|
scipy
|
scipy-main/scipy/fft/_helper.py
|
from functools import update_wrapper, lru_cache
from ._pocketfft import helper as _helper
def next_fast_len(target, real=False):
"""Find the next fast size of input data to ``fft``, for zero-padding, etc.
SciPy's FFT algorithms gain their speed by a recursive divide and conquer
strategy. This relies on efficient functions for small prime factors of the
input length. Thus, the transforms are fastest when using composites of the
prime factors handled by the fft implementation. If there are efficient
functions for all radices <= `n`, then the result will be a number `x`
>= ``target`` with only prime factors < `n`. (Also known as `n`-smooth
numbers)
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
real : bool, optional
True if the FFT involves real input or output (e.g., `rfft` or `hfft`
but not `fft`). Defaults to False.
Returns
-------
out : int
The smallest fast length greater than or equal to ``target``.
Notes
-----
The result of this function may change in future as performance
considerations change, for example, if new prime factors are added.
Calling `fft` or `ifft` with real input data performs an ``'R2C'``
transform internally.
Examples
--------
On a particular machine, an FFT of prime length takes 11.4 ms:
>>> from scipy import fft
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> min_len = 93059 # prime length is worst case for speed
>>> a = rng.standard_normal(min_len)
>>> b = fft.fft(a)
Zero-padding to the next regular length reduces computation time to
1.6 ms, a speedup of 7.3 times:
>>> fft.next_fast_len(min_len, real=True)
93312
>>> b = fft.fft(a, 93312)
Rounding up to the next power of 2 is not optimal, taking 3.0 ms to
compute; 1.9 times longer than the size given by ``next_fast_len``:
>>> b = fft.fft(a, 131072)
"""
pass
# Directly wrap the c-function good_size but take the docstring etc., from the
# next_fast_len function above
next_fast_len = update_wrapper(lru_cache(_helper.good_size), next_fast_len)
next_fast_len.__wrapped__ = _helper.good_size
def _init_nd_shape_and_axes(x, shape, axes):
"""Handle shape and axes arguments for N-D transforms.
Returns the shape and axes in a standard form, taking into account negative
values and checking for various potential errors.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterparts.
Returns
-------
shape : array
The shape of the result. It is a 1-D integer array.
axes : array
Axes along which the calculation is computed. It is a 1-D integer array.
"""
return _helper._init_nd_shape_and_axes(x, shape, axes)
| 3,435
| 32.686275
| 80
|
py
|
scipy
|
scipy-main/scipy/fft/_fftlog.py
|
'''Fast Hankel transforms using the FFTLog algorithm.
The implementation closely follows the Fortran code of Hamilton (2000).
added: 14/11/2020 Nicolas Tessore <n.tessore@ucl.ac.uk>
'''
import numpy as np
from warnings import warn
from ._basic import rfft, irfft
from ..special import loggamma, poch
__all__ = [
'fht', 'ifht',
'fhtoffset',
]
# constants
LN_2 = np.log(2)
def fht(a, dln, mu, offset=0.0, bias=0.0):
r'''Compute the fast Hankel transform.
Computes the discrete Hankel transform of a logarithmically spaced periodic
sequence using the FFTLog algorithm [1]_, [2]_.
Parameters
----------
a : array_like (..., n)
Real periodic input array, uniformly logarithmically spaced. For
multidimensional input, the transform is performed over the last axis.
dln : float
Uniform logarithmic spacing of the input array.
mu : float
Order of the Hankel transform, any positive or negative real number.
offset : float, optional
Offset of the uniform logarithmic spacing of the output array.
bias : float, optional
Exponent of power law bias, any positive or negative real number.
Returns
-------
A : array_like (..., n)
The transformed output array, which is real, periodic, uniformly
logarithmically spaced, and of the same shape as the input array.
See Also
--------
ifht : The inverse of `fht`.
fhtoffset : Return an optimal offset for `fht`.
Notes
-----
This function computes a discrete version of the Hankel transform
.. math::
A(k) = \int_{0}^{\infty} \! a(r) \, J_\mu(kr) \, k \, dr \;,
where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index
:math:`\mu` may be any real number, positive or negative.
The input array `a` is a periodic sequence of length :math:`n`, uniformly
logarithmically spaced with spacing `dln`,
.. math::
a_j = a(r_j) \;, \quad
r_j = r_c \exp[(j-j_c) \, \mathtt{dln}]
centred about the point :math:`r_c`. Note that the central index
:math:`j_c = (n-1)/2` is half-integral if :math:`n` is even, so that
:math:`r_c` falls between two input elements. Similarly, the output
array `A` is a periodic sequence of length :math:`n`, also uniformly
logarithmically spaced with spacing `dln`
.. math::
A_j = A(k_j) \;, \quad
k_j = k_c \exp[(j-j_c) \, \mathtt{dln}]
centred about the point :math:`k_c`.
The centre points :math:`r_c` and :math:`k_c` of the periodic intervals may
be chosen arbitrarily, but it would be usual to choose the product
:math:`k_c r_c = k_j r_{n-1-j} = k_{n-1-j} r_j` to be unity. This can be
changed using the `offset` parameter, which controls the logarithmic offset
:math:`\log(k_c) = \mathtt{offset} - \log(r_c)` of the output array.
Choosing an optimal value for `offset` may reduce ringing of the discrete
Hankel transform.
If the `bias` parameter is nonzero, this function computes a discrete
version of the biased Hankel transform
.. math::
A(k) = \int_{0}^{\infty} \! a_q(r) \, (kr)^q \, J_\mu(kr) \, k \, dr
where :math:`q` is the value of `bias`, and a power law bias
:math:`a_q(r) = a(r) \, (kr)^{-q}` is applied to the input sequence.
Biasing the transform can help approximate the continuous transform of
:math:`a(r)` if there is a value :math:`q` such that :math:`a_q(r)` is
close to a periodic sequence, in which case the resulting :math:`A(k)` will
be close to the continuous transform.
References
----------
.. [1] Talman J. D., 1978, J. Comp. Phys., 29, 35
.. [2] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191)
Examples
--------
This example is the adapted version of ``fftlogtest.f`` which is provided
in [2]_. It evaluates the integral
.. math::
\int^\infty_0 r^{\mu+1} \exp(-r^2/2) J_\mu(k, r) k dr
= k^{\mu+1} \exp(-k^2/2) .
>>> import numpy as np
>>> from scipy import fft
>>> import matplotlib.pyplot as plt
Parameters for the transform.
>>> mu = 0.0 # Order mu of Bessel function
>>> r = np.logspace(-7, 1, 128) # Input evaluation points
>>> dln = np.log(r[1]/r[0]) # Step size
>>> offset = fft.fhtoffset(dln, initial=-6*np.log(10), mu=mu)
>>> k = np.exp(offset)/r[::-1] # Output evaluation points
Define the analytical function.
>>> def f(x, mu):
... """Analytical function: x^(mu+1) exp(-x^2/2)."""
... return x**(mu + 1)*np.exp(-x**2/2)
Evaluate the function at ``r`` and compute the corresponding values at
``k`` using FFTLog.
>>> a_r = f(r, mu)
>>> fht = fft.fht(a_r, dln, mu=mu, offset=offset)
For this example we can actually compute the analytical response (which in
this case is the same as the input function) for comparison and compute the
relative error.
>>> a_k = f(k, mu)
>>> rel_err = abs((fht-a_k)/a_k)
Plot the result.
>>> figargs = {'sharex': True, 'sharey': True, 'constrained_layout': True}
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), **figargs)
>>> ax1.set_title(r'$r^{\mu+1}\ \exp(-r^2/2)$')
>>> ax1.loglog(r, a_r, 'k', lw=2)
>>> ax1.set_xlabel('r')
>>> ax2.set_title(r'$k^{\mu+1} \exp(-k^2/2)$')
>>> ax2.loglog(k, a_k, 'k', lw=2, label='Analytical')
>>> ax2.loglog(k, fht, 'C3--', lw=2, label='FFTLog')
>>> ax2.set_xlabel('k')
>>> ax2.legend(loc=3, framealpha=1)
>>> ax2.set_ylim([1e-10, 1e1])
>>> ax2b = ax2.twinx()
>>> ax2b.loglog(k, rel_err, 'C0', label='Rel. Error (-)')
>>> ax2b.set_ylabel('Rel. Error (-)', color='C0')
>>> ax2b.tick_params(axis='y', labelcolor='C0')
>>> ax2b.legend(loc=4, framealpha=1)
>>> ax2b.set_ylim([1e-9, 1e-3])
>>> plt.show()
'''
# size of transform
n = np.shape(a)[-1]
# bias input array
if bias != 0:
# a_q(r) = a(r) (r/r_c)^{-q}
j_c = (n-1)/2
j = np.arange(n)
a = a * np.exp(-bias*(j - j_c)*dln)
# compute FHT coefficients
u = fhtcoeff(n, dln, mu, offset=offset, bias=bias)
# transform
A = _fhtq(a, u)
# bias output array
if bias != 0:
# A(k) = A_q(k) (k/k_c)^{-q} (k_c r_c)^{-q}
A *= np.exp(-bias*((j - j_c)*dln + offset))
return A
def ifht(A, dln, mu, offset=0.0, bias=0.0):
r'''Compute the inverse fast Hankel transform.
Computes the discrete inverse Hankel transform of a logarithmically spaced
periodic sequence. This is the inverse operation to `fht`.
Parameters
----------
A : array_like (..., n)
Real periodic input array, uniformly logarithmically spaced. For
multidimensional input, the transform is performed over the last axis.
dln : float
Uniform logarithmic spacing of the input array.
mu : float
Order of the Hankel transform, any positive or negative real number.
offset : float, optional
Offset of the uniform logarithmic spacing of the output array.
bias : float, optional
Exponent of power law bias, any positive or negative real number.
Returns
-------
a : array_like (..., n)
The transformed output array, which is real, periodic, uniformly
logarithmically spaced, and of the same shape as the input array.
See Also
--------
fht : Definition of the fast Hankel transform.
fhtoffset : Return an optimal offset for `ifht`.
Notes
-----
This function computes a discrete version of the Hankel transform
.. math::
a(r) = \int_{0}^{\infty} \! A(k) \, J_\mu(kr) \, r \, dk \;,
where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index
:math:`\mu` may be any real number, positive or negative.
See `fht` for further details.
'''
# size of transform
n = np.shape(A)[-1]
# bias input array
if bias != 0:
# A_q(k) = A(k) (k/k_c)^{q} (k_c r_c)^{q}
j_c = (n-1)/2
j = np.arange(n)
A = A * np.exp(bias*((j - j_c)*dln + offset))
# compute FHT coefficients
u = fhtcoeff(n, dln, mu, offset=offset, bias=bias)
# transform
a = _fhtq(A, u, inverse=True)
# bias output array
if bias != 0:
# a(r) = a_q(r) (r/r_c)^{q}
a /= np.exp(-bias*(j - j_c)*dln)
return a
def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0):
'''Compute the coefficient array for a fast Hankel transform.
'''
lnkr, q = offset, bias
# Hankel transform coefficients
# u_m = (kr)^{-i 2m pi/(n dlnr)} U_mu(q + i 2m pi/(n dlnr))
# with U_mu(x) = 2^x Gamma((mu+1+x)/2)/Gamma((mu+1-x)/2)
xp = (mu+1+q)/2
xm = (mu+1-q)/2
y = np.linspace(0, np.pi*(n//2)/(n*dln), n//2+1)
u = np.empty(n//2+1, dtype=complex)
v = np.empty(n//2+1, dtype=complex)
u.imag[:] = y
u.real[:] = xm
loggamma(u, out=v)
u.real[:] = xp
loggamma(u, out=u)
y *= 2*(LN_2 - lnkr)
u.real -= v.real
u.real += LN_2*q
u.imag += v.imag
u.imag += y
np.exp(u, out=u)
# fix last coefficient to be real
u.imag[-1] = 0
# deal with special cases
if not np.isfinite(u[0]):
# write u_0 = 2^q Gamma(xp)/Gamma(xm) = 2^q poch(xm, xp-xm)
# poch() handles special cases for negative integers correctly
u[0] = 2**q * poch(xm, xp-xm)
# the coefficient may be inf or 0, meaning the transform or the
# inverse transform, respectively, is singular
return u
def fhtoffset(dln, mu, initial=0.0, bias=0.0):
'''Return optimal offset for a fast Hankel transform.
Returns an offset close to `initial` that fulfils the low-ringing
condition of [1]_ for the fast Hankel transform `fht` with logarithmic
spacing `dln`, order `mu` and bias `bias`.
Parameters
----------
dln : float
Uniform logarithmic spacing of the transform.
mu : float
Order of the Hankel transform, any positive or negative real number.
initial : float, optional
Initial value for the offset. Returns the closest value that fulfils
the low-ringing condition.
bias : float, optional
Exponent of power law bias, any positive or negative real number.
Returns
-------
offset : float
Optimal offset of the uniform logarithmic spacing of the transform that
fulfils a low-ringing condition.
See Also
--------
fht : Definition of the fast Hankel transform.
References
----------
.. [1] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191)
'''
lnkr, q = initial, bias
xp = (mu+1+q)/2
xm = (mu+1-q)/2
y = np.pi/(2*dln)
zp = loggamma(xp + 1j*y)
zm = loggamma(xm + 1j*y)
arg = (LN_2 - lnkr)/dln + (zp.imag + zm.imag)/np.pi
return lnkr + (arg - np.round(arg))*dln
def _fhtq(a, u, inverse=False):
'''Compute the biased fast Hankel transform.
This is the basic FFTLog routine.
'''
# size of transform
n = np.shape(a)[-1]
# check for singular transform or singular inverse transform
if np.isinf(u[0]) and not inverse:
warn('singular transform; consider changing the bias')
# fix coefficient to obtain (potentially correct) transform anyway
u = u.copy()
u[0] = 0
elif u[0] == 0 and inverse:
warn('singular inverse transform; consider changing the bias')
# fix coefficient to obtain (potentially correct) inverse anyway
u = u.copy()
u[0] = np.inf
# biased fast Hankel transform via real FFT
A = rfft(a, axis=-1)
if not inverse:
# forward transform
A *= u
else:
# backward transform
A /= u.conj()
A = irfft(A, n, axis=-1)
A = A[..., ::-1]
return A
| 11,879
| 29.383632
| 79
|
py
|
scipy
|
scipy-main/scipy/fft/_fftlog_multimethods.py
|
'''Multimethods for fast Hankel transforms.
'''
import numpy as np
from ._basic import _dispatch
from ._fftlog import fht as _fht
from ._fftlog import ifht as _ifht
from scipy._lib.uarray import Dispatchable
__all__ = ['fht', 'ifht']
@_dispatch
def fht(a, dln, mu, offset=0.0, bias=0.0):
"""fht multimethod."""
return (Dispatchable(a, np.ndarray),)
@_dispatch
def ifht(A, dln, mu, offset=0.0, bias=0.0):
"""ifht multimethod."""
return (Dispatchable(A, np.ndarray),)
# copy over the docstrings
fht.__doc__ = _fht.__doc__
ifht.__doc__ = _ifht.__doc__
| 575
| 18.2
| 43
|
py
|
scipy
|
scipy-main/scipy/fft/__init__.py
|
"""
==============================================
Discrete Fourier transforms (:mod:`scipy.fft`)
==============================================
.. currentmodule:: scipy.fft
Fast Fourier Transforms (FFTs)
==============================
.. autosummary::
:toctree: generated/
fft - Fast (discrete) Fourier Transform (FFT)
ifft - Inverse FFT
fft2 - 2-D FFT
ifft2 - 2-D inverse FFT
fftn - N-D FFT
ifftn - N-D inverse FFT
rfft - FFT of strictly real-valued sequence
irfft - Inverse of rfft
rfft2 - 2-D FFT of real sequence
irfft2 - Inverse of rfft2
rfftn - N-D FFT of real sequence
irfftn - Inverse of rfftn
hfft - FFT of a Hermitian sequence (real spectrum)
ihfft - Inverse of hfft
hfft2 - 2-D FFT of a Hermitian sequence
ihfft2 - Inverse of hfft2
hfftn - N-D FFT of a Hermitian sequence
ihfftn - Inverse of hfftn
Discrete Sin and Cosine Transforms (DST and DCT)
================================================
.. autosummary::
:toctree: generated/
dct - Discrete cosine transform
idct - Inverse discrete cosine transform
dctn - N-D Discrete cosine transform
idctn - N-D Inverse discrete cosine transform
dst - Discrete sine transform
idst - Inverse discrete sine transform
dstn - N-D Discrete sine transform
idstn - N-D Inverse discrete sine transform
Fast Hankel Transforms
======================
.. autosummary::
:toctree: generated/
fht - Fast Hankel transform
ifht - Inverse of fht
Helper functions
================
.. autosummary::
:toctree: generated/
fftshift - Shift the zero-frequency component to the center of the spectrum
ifftshift - The inverse of `fftshift`
fftfreq - Return the Discrete Fourier Transform sample frequencies
rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
fhtoffset - Compute an optimal offset for the Fast Hankel Transform
next_fast_len - Find the optimal length to zero-pad an FFT for speed
set_workers - Context manager to set default number of workers
get_workers - Get the current default number of workers
Backend control
===============
.. autosummary::
:toctree: generated/
set_backend - Context manager to set the backend within a fixed scope
skip_backend - Context manager to skip a backend within a fixed scope
set_global_backend - Sets the global fft backend
register_backend - Register a backend for permanent use
"""
from ._basic import (
fft, ifft, fft2, ifft2, fftn, ifftn,
rfft, irfft, rfft2, irfft2, rfftn, irfftn,
hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)
from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn
from ._fftlog import fhtoffset
from ._fftlog_multimethods import fht, ifht
from ._helper import next_fast_len
from ._backend import (set_backend, skip_backend, set_global_backend,
register_backend)
from numpy.fft import fftfreq, rfftfreq, fftshift, ifftshift
from ._pocketfft.helper import set_workers, get_workers
__all__ = [
'fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',
'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
'next_fast_len',
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',
'fht', 'ifht',
'fhtoffset',
'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',
'get_workers', 'set_workers']
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 3,567
| 30.298246
| 78
|
py
|
scipy
|
scipy-main/scipy/fft/_debug_backends.py
|
import numpy as np
class NumPyBackend:
"""Backend that uses numpy.fft"""
__ua_domain__ = "numpy.scipy.fft"
@staticmethod
def __ua_function__(method, args, kwargs):
kwargs.pop("overwrite_x", None)
fn = getattr(np.fft, method.__name__, None)
return (NotImplemented if fn is None
else fn(*args, **kwargs))
class EchoBackend:
"""Backend that just prints the __ua_function__ arguments"""
__ua_domain__ = "numpy.scipy.fft"
@staticmethod
def __ua_function__(method, args, kwargs):
print(method, args, kwargs, sep='\n')
| 598
| 25.043478
| 64
|
py
|
scipy
|
scipy-main/scipy/fft/_backend.py
|
import scipy._lib.uarray as ua
from . import _fftlog
from . import _pocketfft
class _ScipyBackend:
"""The default backend for fft calculations
Notes
-----
We use the domain ``numpy.scipy`` rather than ``scipy`` because ``uarray``
treats the domain as a hierarchy. This means the user can install a single
backend for ``numpy`` and have it implement ``numpy.scipy.fft`` as well.
"""
__ua_domain__ = "numpy.scipy.fft"
@staticmethod
def __ua_function__(method, args, kwargs):
fn = getattr(_pocketfft, method.__name__, None)
if fn is None:
fn = getattr(_fftlog, method.__name__, None)
if fn is None:
return NotImplemented
return fn(*args, **kwargs)
_named_backends = {
'scipy': _ScipyBackend,
}
def _backend_from_arg(backend):
"""Maps strings to known backends and validates the backend"""
if isinstance(backend, str):
try:
backend = _named_backends[backend]
except KeyError as e:
raise ValueError(f'Unknown backend {backend}') from e
if backend.__ua_domain__ != 'numpy.scipy.fft':
raise ValueError('Backend does not implement "numpy.scipy.fft"')
return backend
def set_global_backend(backend, coerce=False, only=False, try_last=False):
"""Sets the global fft backend
This utility method replaces the default backend for permanent use. It
will be tried in the list of backends automatically, unless the
``only`` flag is set on a backend. This will be the first tried
backend outside the :obj:`set_backend` context manager.
Parameters
----------
backend : {object, 'scipy'}
The backend to use.
Can either be a ``str`` containing the name of a known backend
{'scipy'} or an object that implements the uarray protocol.
coerce : bool
Whether to coerce input types when trying this backend.
only : bool
If ``True``, no more backends will be tried if this fails.
Implied by ``coerce=True``.
try_last : bool
If ``True``, the global backend is tried after registered backends.
Raises
------
ValueError: If the backend does not implement ``numpy.scipy.fft``.
Notes
-----
This will overwrite the previously set global backend, which, by default, is
the SciPy implementation.
Examples
--------
We can set the global fft backend:
>>> from scipy.fft import fft, set_global_backend
>>> set_global_backend("scipy") # Sets global backend. "scipy" is the default backend.
>>> fft([1]) # Calls the global backend
array([1.+0.j])
"""
backend = _backend_from_arg(backend)
ua.set_global_backend(backend, coerce=coerce, only=only, try_last=try_last)
def register_backend(backend):
"""
Register a backend for permanent use.
Registered backends have the lowest priority and will be tried after the
global backend.
Parameters
----------
backend : {object, 'scipy'}
The backend to use.
Can either be a ``str`` containing the name of a known backend
{'scipy'} or an object that implements the uarray protocol.
Raises
------
ValueError: If the backend does not implement ``numpy.scipy.fft``.
Examples
--------
We can register a new fft backend:
>>> from scipy.fft import fft, register_backend, set_global_backend
>>> class NoopBackend: # Define an invalid Backend
... __ua_domain__ = "numpy.scipy.fft"
... def __ua_function__(self, func, args, kwargs):
... return NotImplemented
>>> set_global_backend(NoopBackend()) # Set the invalid backend as global
>>> register_backend("scipy") # Register a new backend
>>> fft([1]) # The registered backend is called because the global backend returns `NotImplemented`
array([1.+0.j])
>>> set_global_backend("scipy") # Restore global backend to default
"""
backend = _backend_from_arg(backend)
ua.register_backend(backend)
def set_backend(backend, coerce=False, only=False):
"""Context manager to set the backend within a fixed scope.
Upon entering the ``with`` statement, the given backend will be added to
the list of available backends with the highest priority. Upon exit, the
backend is reset to the state before entering the scope.
Parameters
----------
backend : {object, 'scipy'}
The backend to use.
Can either be a ``str`` containing the name of a known backend
{'scipy'} or an object that implements the uarray protocol.
coerce : bool, optional
Whether to allow expensive conversions for the ``x`` parameter. e.g.,
copying a NumPy array to the GPU for a CuPy backend. Implies ``only``.
only : bool, optional
If only is ``True`` and this backend returns ``NotImplemented``, then a
BackendNotImplemented error will be raised immediately. Ignoring any
lower priority backends.
Examples
--------
>>> import scipy.fft as fft
>>> with fft.set_backend('scipy', only=True):
... fft.fft([1]) # Always calls the scipy implementation
array([1.+0.j])
"""
backend = _backend_from_arg(backend)
return ua.set_backend(backend, coerce=coerce, only=only)
def skip_backend(backend):
"""Context manager to skip a backend within a fixed scope.
Within the context of a ``with`` statement, the given backend will not be
called. This covers backends registered both locally and globally. Upon
exit, the backend will again be considered.
Parameters
----------
backend : {object, 'scipy'}
The backend to skip.
Can either be a ``str`` containing the name of a known backend
{'scipy'} or an object that implements the uarray protocol.
Examples
--------
>>> import scipy.fft as fft
>>> fft.fft([1]) # Calls default SciPy backend
array([1.+0.j])
>>> with fft.skip_backend('scipy'): # We explicitly skip the SciPy backend
... fft.fft([1]) # leaving no implementation available
Traceback (most recent call last):
...
BackendNotImplementedError: No selected backends had an implementation ...
"""
backend = _backend_from_arg(backend)
return ua.skip_backend(backend)
set_global_backend('scipy', try_last=True)
| 6,388
| 32.276042
| 104
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/setup.py
|
def pre_build_hook(build_ext, ext):
from scipy._build_utils.compiler_helper import (
set_cxx_flags_hook, try_add_flag, try_compile, has_flag)
cc = build_ext._cxx_compiler
args = ext.extra_compile_args
set_cxx_flags_hook(build_ext, ext)
if cc.compiler_type == 'msvc':
args.append('/EHsc')
else:
# Use pthreads if available
has_pthreads = try_compile(cc, code='#include <pthread.h>\n'
'int main(int argc, char **argv) {}')
if has_pthreads:
ext.define_macros.append(('POCKETFFT_PTHREADS', None))
if has_flag(cc, '-pthread'):
args.append('-pthread')
ext.extra_link_args.append('-pthread')
else:
raise RuntimeError("Build failed: System has pthreads header "
"but could not compile with -pthread option")
# Don't export library symbols
try_add_flag(args, cc, '-fvisibility=hidden')
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
import pybind11
include_dirs = [pybind11.get_include(True), pybind11.get_include(False)]
config = Configuration('_pocketfft', parent_package, top_path)
ext = config.add_extension('pypocketfft',
sources=['pypocketfft.cxx'],
depends=['pocketfft_hdronly.h'],
include_dirs=include_dirs,
language='c++')
ext._pre_build_hook = pre_build_hook
config.add_data_files('LICENSE.md')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,819
| 36.142857
| 80
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/helper.py
|
from numbers import Number
import operator
import os
import threading
import contextlib
import numpy as np
# good_size is exposed (and used) from this import
from .pypocketfft import good_size # noqa: F401
_config = threading.local()
_cpu_count = os.cpu_count()
def _iterable_of_int(x, name=None):
"""Convert ``x`` to an iterable sequence of int
Parameters
----------
x : value, or sequence of values, convertible to int
name : str, optional
Name of the argument being converted, only used in the error message
Returns
-------
y : ``List[int]``
"""
if isinstance(x, Number):
x = (x,)
try:
x = [operator.index(a) for a in x]
except TypeError as e:
name = name or "value"
raise ValueError("{} must be a scalar or iterable of integers"
.format(name)) from e
return x
def _init_nd_shape_and_axes(x, shape, axes):
"""Handles shape and axes arguments for nd transforms"""
noshape = shape is None
noaxes = axes is None
if not noaxes:
axes = _iterable_of_int(axes, 'axes')
axes = [a + x.ndim if a < 0 else a for a in axes]
if any(a >= x.ndim or a < 0 for a in axes):
raise ValueError("axes exceeds dimensionality of input")
if len(set(axes)) != len(axes):
raise ValueError("all axes must be unique")
if not noshape:
shape = _iterable_of_int(shape, 'shape')
if axes and len(axes) != len(shape):
raise ValueError("when given, axes and shape arguments"
" have to be of the same length")
if noaxes:
if len(shape) > x.ndim:
raise ValueError("shape requires more axes than are present")
axes = range(x.ndim - len(shape), x.ndim)
shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)]
elif noaxes:
shape = list(x.shape)
axes = range(x.ndim)
else:
shape = [x.shape[a] for a in axes]
if any(s < 1 for s in shape):
raise ValueError(
f"invalid number of data points ({shape}) specified")
return shape, axes
def _asfarray(x):
"""
Convert to array with floating or complex dtype.
float16 values are also promoted to float32.
"""
if not hasattr(x, "dtype"):
x = np.asarray(x)
if x.dtype == np.float16:
return np.asarray(x, np.float32)
elif x.dtype.kind not in 'fc':
return np.asarray(x, np.float64)
# Require native byte order
dtype = x.dtype.newbyteorder('=')
# Always align input
copy = not x.flags['ALIGNED']
return np.array(x, dtype=dtype, copy=copy)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
def _fix_shape(x, shape, axes):
"""Internal auxiliary function for _raw_fft, _raw_fftnd."""
must_copy = False
# Build an nd slice with the dimensions to be read from x
index = [slice(None)]*x.ndim
for n, ax in zip(shape, axes):
if x.shape[ax] >= n:
index[ax] = slice(0, n)
else:
index[ax] = slice(0, x.shape[ax])
must_copy = True
index = tuple(index)
if not must_copy:
return x[index], False
s = list(x.shape)
for n, axis in zip(shape, axes):
s[axis] = n
z = np.zeros(s, x.dtype)
z[index] = x[index]
return z, True
def _fix_shape_1d(x, n, axis):
if n < 1:
raise ValueError(
f"invalid number of data points ({n}) specified")
return _fix_shape(x, (n,), (axis,))
_NORM_MAP = {None: 0, 'backward': 0, 'ortho': 1, 'forward': 2}
def _normalization(norm, forward):
"""Returns the pypocketfft normalization mode from the norm argument"""
try:
inorm = _NORM_MAP[norm]
return inorm if forward else (2 - inorm)
except KeyError:
raise ValueError(
f'Invalid norm value {norm!r}, should '
'be "backward", "ortho" or "forward"') from None
def _workers(workers):
if workers is None:
return getattr(_config, 'default_workers', 1)
if workers < 0:
if workers >= -_cpu_count:
workers += 1 + _cpu_count
else:
raise ValueError("workers value out of range; got {}, must not be"
" less than {}".format(workers, -_cpu_count))
elif workers == 0:
raise ValueError("workers must not be zero")
return workers
@contextlib.contextmanager
def set_workers(workers):
"""Context manager for the default number of workers used in `scipy.fft`
Parameters
----------
workers : int
The default number of workers to use
Examples
--------
>>> import numpy as np
>>> from scipy import fft, signal
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal((128, 64))
>>> with fft.set_workers(4):
... y = signal.fftconvolve(x, x)
"""
old_workers = get_workers()
_config.default_workers = _workers(operator.index(workers))
try:
yield
finally:
_config.default_workers = old_workers
def get_workers():
"""Returns the default number of workers within the current context
Examples
--------
>>> from scipy import fft
>>> fft.get_workers()
1
>>> with fft.set_workers(4):
... fft.get_workers()
4
"""
return getattr(_config, 'default_workers', 1)
| 5,721
| 25.368664
| 79
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/realtransforms.py
|
import numpy as np
from . import pypocketfft as pfft
from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
_fix_shape, _fix_shape_1d, _normalization, _workers)
import functools
def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None,
overwrite_x=False, workers=None, orthogonalize=None):
"""Forward or backward 1-D DCT/DST
Parameters
----------
forward : bool
Transform direction (determines type and normalisation)
transform : {pypocketfft.dct, pypocketfft.dst}
The transform to perform
"""
tmp = _asfarray(x)
overwrite_x = overwrite_x or _datacopied(tmp, x)
norm = _normalization(norm, forward)
workers = _workers(workers)
if not forward:
if type == 2:
type = 3
elif type == 3:
type = 2
if n is not None:
tmp, copied = _fix_shape_1d(tmp, n, axis)
overwrite_x = overwrite_x or copied
elif tmp.shape[axis] < 1:
raise ValueError("invalid number of data points ({}) specified"
.format(tmp.shape[axis]))
out = (tmp if overwrite_x else None)
# For complex input, transform real and imaginary components separably
if np.iscomplexobj(x):
out = np.empty_like(tmp) if out is None else out
transform(tmp.real, type, (axis,), norm, out.real, workers)
transform(tmp.imag, type, (axis,), norm, out.imag, workers)
return out
return transform(tmp, type, (axis,), norm, out, workers, orthogonalize)
dct = functools.partial(_r2r, True, pfft.dct)
dct.__name__ = 'dct'
idct = functools.partial(_r2r, False, pfft.dct)
idct.__name__ = 'idct'
dst = functools.partial(_r2r, True, pfft.dst)
dst.__name__ = 'dst'
idst = functools.partial(_r2r, False, pfft.dst)
idst.__name__ = 'idst'
def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None,
overwrite_x=False, workers=None, orthogonalize=None):
"""Forward or backward nd DCT/DST
Parameters
----------
forward : bool
Transform direction (determines type and normalisation)
transform : {pypocketfft.dct, pypocketfft.dst}
The transform to perform
"""
tmp = _asfarray(x)
shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
overwrite_x = overwrite_x or _datacopied(tmp, x)
if len(axes) == 0:
return x
tmp, copied = _fix_shape(tmp, shape, axes)
overwrite_x = overwrite_x or copied
if not forward:
if type == 2:
type = 3
elif type == 3:
type = 2
norm = _normalization(norm, forward)
workers = _workers(workers)
out = (tmp if overwrite_x else None)
# For complex input, transform real and imaginary components separably
if np.iscomplexobj(x):
out = np.empty_like(tmp) if out is None else out
transform(tmp.real, type, axes, norm, out.real, workers)
transform(tmp.imag, type, axes, norm, out.imag, workers)
return out
return transform(tmp, type, axes, norm, out, workers, orthogonalize)
dctn = functools.partial(_r2rn, True, pfft.dct)
dctn.__name__ = 'dctn'
idctn = functools.partial(_r2rn, False, pfft.dct)
idctn.__name__ = 'idctn'
dstn = functools.partial(_r2rn, True, pfft.dst)
dstn.__name__ = 'dstn'
idstn = functools.partial(_r2rn, False, pfft.dst)
idstn.__name__ = 'idstn'
| 3,378
| 29.441441
| 75
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/__init__.py
|
""" FFT backend using pypocketfft """
from .basic import *
from .realtransforms import *
from .helper import *
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 207
| 19.8
| 46
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/basic.py
|
"""
Discrete Fourier Transforms - basic.py
"""
import numpy as np
import functools
from . import pypocketfft as pfft
from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
_fix_shape, _fix_shape_1d, _normalization,
_workers)
def c2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
workers=None, *, plan=None):
""" Return discrete Fourier transform of real or complex sequence. """
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
tmp = _asfarray(x)
overwrite_x = overwrite_x or _datacopied(tmp, x)
norm = _normalization(norm, forward)
workers = _workers(workers)
if n is not None:
tmp, copied = _fix_shape_1d(tmp, n, axis)
overwrite_x = overwrite_x or copied
elif tmp.shape[axis] < 1:
raise ValueError("invalid number of data points ({}) specified"
.format(tmp.shape[axis]))
out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None)
return pfft.c2c(tmp, (axis,), forward, norm, out, workers)
fft = functools.partial(c2c, True)
fft.__name__ = 'fft'
ifft = functools.partial(c2c, False)
ifft.__name__ = 'ifft'
def r2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
workers=None, *, plan=None):
"""
Discrete Fourier transform of a real sequence.
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
tmp = _asfarray(x)
norm = _normalization(norm, forward)
workers = _workers(workers)
if not np.isrealobj(tmp):
raise TypeError("x must be a real sequence")
if n is not None:
tmp, _ = _fix_shape_1d(tmp, n, axis)
elif tmp.shape[axis] < 1:
raise ValueError("invalid number of data points ({}) specified"
.format(tmp.shape[axis]))
# Note: overwrite_x is not utilised
return pfft.r2c(tmp, (axis,), forward, norm, None, workers)
rfft = functools.partial(r2c, True)
rfft.__name__ = 'rfft'
ihfft = functools.partial(r2c, False)
ihfft.__name__ = 'ihfft'
def c2r(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
workers=None, *, plan=None):
"""
Return inverse discrete Fourier transform of real sequence x.
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
tmp = _asfarray(x)
norm = _normalization(norm, forward)
workers = _workers(workers)
# TODO: Optimize for hermitian and real?
if np.isrealobj(tmp):
tmp = tmp + 0.j
# Last axis utilizes hermitian symmetry
if n is None:
n = (tmp.shape[axis] - 1) * 2
if n < 1:
raise ValueError("Invalid number of data points ({}) specified"
.format(n))
else:
tmp, _ = _fix_shape_1d(tmp, (n//2) + 1, axis)
# Note: overwrite_x is not utilized
return pfft.c2r(tmp, (axis,), n, forward, norm, None, workers)
hfft = functools.partial(c2r, True)
hfft.__name__ = 'hfft'
irfft = functools.partial(c2r, False)
irfft.__name__ = 'irfft'
def fft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
*, plan=None):
"""
2-D discrete Fourier transform.
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
return fftn(x, s, axes, norm, overwrite_x, workers)
def ifft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
*, plan=None):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
return ifftn(x, s, axes, norm, overwrite_x, workers)
def rfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
*, plan=None):
"""
2-D discrete Fourier transform of a real sequence
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
return rfftn(x, s, axes, norm, overwrite_x, workers)
def irfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
*, plan=None):
"""
2-D discrete inverse Fourier transform of a real sequence
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
return irfftn(x, s, axes, norm, overwrite_x, workers)
def hfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
*, plan=None):
"""
2-D discrete Fourier transform of a Hermitian sequence
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
return hfftn(x, s, axes, norm, overwrite_x, workers)
def ihfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
*, plan=None):
"""
2-D discrete inverse Fourier transform of a Hermitian sequence
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
return ihfftn(x, s, axes, norm, overwrite_x, workers)
def c2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, *, plan=None):
"""
Return multidimensional discrete Fourier transform.
"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
tmp = _asfarray(x)
shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
overwrite_x = overwrite_x or _datacopied(tmp, x)
workers = _workers(workers)
if len(axes) == 0:
return x
tmp, copied = _fix_shape(tmp, shape, axes)
overwrite_x = overwrite_x or copied
norm = _normalization(norm, forward)
out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None)
return pfft.c2c(tmp, axes, forward, norm, out, workers)
fftn = functools.partial(c2cn, True)
fftn.__name__ = 'fftn'
ifftn = functools.partial(c2cn, False)
ifftn.__name__ = 'ifftn'
def r2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, *, plan=None):
"""Return multidimensional discrete Fourier transform of real input"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
tmp = _asfarray(x)
if not np.isrealobj(tmp):
raise TypeError("x must be a real sequence")
shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
tmp, _ = _fix_shape(tmp, shape, axes)
norm = _normalization(norm, forward)
workers = _workers(workers)
if len(axes) == 0:
raise ValueError("at least 1 axis must be transformed")
# Note: overwrite_x is not utilized
return pfft.r2c(tmp, axes, forward, norm, None, workers)
rfftn = functools.partial(r2cn, True)
rfftn.__name__ = 'rfftn'
ihfftn = functools.partial(r2cn, False)
ihfftn.__name__ = 'ihfftn'
def c2rn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
workers=None, *, plan=None):
"""Multidimensional inverse discrete fourier transform with real output"""
if plan is not None:
raise NotImplementedError('Passing a precomputed plan is not yet '
'supported by scipy.fft functions')
tmp = _asfarray(x)
# TODO: Optimize for hermitian and real?
if np.isrealobj(tmp):
tmp = tmp + 0.j
noshape = s is None
shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
if len(axes) == 0:
raise ValueError("at least 1 axis must be transformed")
if noshape:
shape[-1] = (x.shape[axes[-1]] - 1) * 2
norm = _normalization(norm, forward)
workers = _workers(workers)
# Last axis utilizes hermitian symmetry
lastsize = shape[-1]
shape[-1] = (shape[-1] // 2) + 1
tmp, _ = _fix_shape(tmp, shape, axes)
# Note: overwrite_x is not utilized
return pfft.c2r(tmp, axes, lastsize, forward, norm, None, workers)
hfftn = functools.partial(c2rn, True)
hfftn.__name__ = 'hfftn'
irfftn = functools.partial(c2rn, False)
irfftn.__name__ = 'irfftn'
def r2r_fftpack(forward, x, n=None, axis=-1, norm=None, overwrite_x=False):
"""FFT of a real sequence, returning fftpack half complex format"""
tmp = _asfarray(x)
overwrite_x = overwrite_x or _datacopied(tmp, x)
norm = _normalization(norm, forward)
workers = _workers(None)
if tmp.dtype.kind == 'c':
raise TypeError('x must be a real sequence')
if n is not None:
tmp, copied = _fix_shape_1d(tmp, n, axis)
overwrite_x = overwrite_x or copied
elif tmp.shape[axis] < 1:
raise ValueError("invalid number of data points ({}) specified"
.format(tmp.shape[axis]))
out = (tmp if overwrite_x else None)
return pfft.r2r_fftpack(tmp, (axis,), forward, forward, norm, out, workers)
rfft_fftpack = functools.partial(r2r_fftpack, True)
rfft_fftpack.__name__ = 'rfft_fftpack'
irfft_fftpack = functools.partial(r2r_fftpack, False)
irfft_fftpack.__name__ = 'irfft_fftpack'
| 9,841
| 32.026846
| 79
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/tests/test_real_transforms.py
|
from os.path import join, dirname
from typing import Callable, Dict, Tuple, Union, Type
import numpy as np
from numpy.testing import (
assert_array_almost_equal, assert_equal, assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.fft._pocketfft.realtransforms import (
dct, idct, dst, idst, dctn, idctn, dstn, idstn)
fftpack_test_dir = join(dirname(__file__), '..', '..', '..', 'fftpack', 'tests')
MDATA_COUNT = 8
FFTWDATA_COUNT = 14
def is_longdouble_binary_compatible():
try:
one = np.frombuffer(
b'\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x00\x00\x00\x00\x00',
dtype='<f16')
return one == np.longfloat(1.)
except TypeError:
return False
def get_reference_data():
ref = getattr(globals(), '__reference_data', None)
if ref is not None:
return ref
# Matlab reference data
MDATA = np.load(join(fftpack_test_dir, 'test.npz'))
X = [MDATA['x%d' % i] for i in range(MDATA_COUNT)]
Y = [MDATA['y%d' % i] for i in range(MDATA_COUNT)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(fftpack_test_dir, 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(fftpack_test_dir, 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
assert len(FFTWDATA_SIZES) == FFTWDATA_COUNT
if is_longdouble_binary_compatible():
FFTWDATA_LONGDOUBLE = np.load(
join(fftpack_test_dir, 'fftw_longdouble_ref.npz'))
else:
FFTWDATA_LONGDOUBLE = {k: v.astype(np.longfloat)
for k,v in FFTWDATA_DOUBLE.items()}
ref = {
'FFTWDATA_LONGDOUBLE': FFTWDATA_LONGDOUBLE,
'FFTWDATA_DOUBLE': FFTWDATA_DOUBLE,
'FFTWDATA_SINGLE': FFTWDATA_SINGLE,
'FFTWDATA_SIZES': FFTWDATA_SIZES,
'X': X,
'Y': Y
}
globals()['__reference_data'] = ref
return ref
@pytest.fixture(params=range(FFTWDATA_COUNT))
def fftwdata_size(request):
return get_reference_data()['FFTWDATA_SIZES'][request.param]
@pytest.fixture(params=range(MDATA_COUNT))
def mdata_x(request):
return get_reference_data()['X'][request.param]
@pytest.fixture(params=range(MDATA_COUNT))
def mdata_xy(request):
ref = get_reference_data()
y = ref['Y'][request.param]
x = ref['X'][request.param]
return x, y
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = get_reference_data()['FFTWDATA_DOUBLE']
elif dt == np.float32:
data = get_reference_data()['FFTWDATA_SINGLE']
elif dt == np.longfloat:
data = get_reference_data()['FFTWDATA_LONGDOUBLE']
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = get_reference_data()['FFTWDATA_DOUBLE']
elif dt == np.float32:
data = get_reference_data()['FFTWDATA_SINGLE']
elif dt == np.longfloat:
data = get_reference_data()['FFTWDATA_LONGDOUBLE']
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def ref_2d(func, x, **kwargs):
"""Calculate 2-D reference data from a 1d transform"""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = func(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = func(x[:, col], **kwargs)
return x
def naive_dct1(x, norm=None):
"""Calculate textbook definition version of DCT-I."""
x = np.array(x, copy=True)
N = len(x)
M = N-1
y = np.zeros(N)
m0, m = 1, 2
if norm == 'ortho':
m0 = np.sqrt(1.0/M)
m = np.sqrt(2.0/M)
for k in range(N):
for n in range(1, N-1):
y[k] += m*x[n]*np.cos(np.pi*n*k/M)
y[k] += m0 * x[0]
y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
if norm == 'ortho':
y[0] *= 1/np.sqrt(2)
y[N-1] *= 1/np.sqrt(2)
return y
def naive_dst1(x, norm=None):
"""Calculate textbook definition version of DST-I."""
x = np.array(x, copy=True)
N = len(x)
M = N+1
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
if norm == 'ortho':
y *= np.sqrt(0.5/M)
return y
def naive_dct4(x, norm=None):
"""Calculate textbook definition version of DCT-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
def naive_dst4(x, norm=None):
"""Calculate textbook definition version of DST-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128, np.longcomplex])
@pytest.mark.parametrize('transform', [dct, dst, idct, idst])
def test_complex(transform, dtype):
y = transform(1j*np.arange(5, dtype=dtype))
x = 1j*transform(np.arange(5))
assert_array_almost_equal(x, y)
DecMapType = Dict[
Tuple[Callable[..., np.ndarray], Union[Type[np.floating], Type[int]], int],
int,
]
# map (tranform, dtype, type) -> decimal
dec_map: DecMapType = {
# DCT
(dct, np.double, 1): 13,
(dct, np.float32, 1): 6,
(dct, np.double, 2): 14,
(dct, np.float32, 2): 5,
(dct, np.double, 3): 14,
(dct, np.float32, 3): 5,
(dct, np.double, 4): 13,
(dct, np.float32, 4): 6,
# IDCT
(idct, np.double, 1): 14,
(idct, np.float32, 1): 6,
(idct, np.double, 2): 14,
(idct, np.float32, 2): 5,
(idct, np.double, 3): 14,
(idct, np.float32, 3): 5,
(idct, np.double, 4): 14,
(idct, np.float32, 4): 6,
# DST
(dst, np.double, 1): 13,
(dst, np.float32, 1): 6,
(dst, np.double, 2): 14,
(dst, np.float32, 2): 6,
(dst, np.double, 3): 14,
(dst, np.float32, 3): 7,
(dst, np.double, 4): 13,
(dst, np.float32, 4): 6,
# IDST
(idst, np.double, 1): 14,
(idst, np.float32, 1): 6,
(idst, np.double, 2): 14,
(idst, np.float32, 2): 6,
(idst, np.double, 3): 14,
(idst, np.float32, 3): 6,
(idst, np.double, 4): 14,
(idst, np.float32, 4): 6,
}
for k,v in dec_map.copy().items():
if k[1] == np.double:
dec_map[(k[0], np.longdouble, k[2])] = v
elif k[1] == np.float32:
dec_map[(k[0], int, k[2])] = v
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
class TestDCT:
def test_definition(self, rdt, type, fftwdata_size):
x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)
y = dct(x, type=type)
assert_equal(y.dtype, dt)
dec = dec_map[(dct, rdt, type)]
assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
@pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64])
def test_axis(self, rdt, type, size):
nt = 2
dec = dec_map[(dct, rdt, type)]
x = np.random.randn(nt, size)
y = dct(x, type=type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=type),
decimal=dec)
x = x.T
y = dct(x, axis=0, type=type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=type),
decimal=dec)
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
def test_dct1_definition_ortho(rdt, mdata_x):
# Test orthornomal mode.
dec = dec_map[(dct, rdt, 1)]
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dct(x, norm='ortho', type=1)
y2 = naive_dct1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
def test_dct2_definition_matlab(mdata_xy, rdt):
# Test correspondence with matlab (orthornomal mode).
dt = np.result_type(np.float32, rdt)
x = np.array(mdata_xy[0], dtype=dt)
yr = mdata_xy[1]
y = dct(x, norm="ortho", type=2)
dec = dec_map[(dct, rdt, 2)]
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=dec)
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
def test_dct3_definition_ortho(mdata_x, rdt):
# Test orthornomal mode.
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
dec = dec_map[(dct, rdt, 3)]
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=dec)
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
def test_dct4_definition_ortho(mdata_x, rdt):
# Test orthornomal mode.
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dct(x, norm='ortho', type=4)
y2 = naive_dct4(x, norm='ortho')
dec = dec_map[(dct, rdt, 4)]
assert_equal(y.dtype, dt)
assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
def test_idct_definition(fftwdata_size, rdt, type):
xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)
x = idct(yr, type=type)
dec = dec_map[(idct, rdt, type)]
assert_equal(x.dtype, dt)
assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
def test_definition(fftwdata_size, rdt, type):
xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)
y = dst(xr, type=type)
dec = dec_map[(dst, rdt, type)]
assert_equal(y.dtype, dt)
assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
def test_dst1_definition_ortho(rdt, mdata_x):
# Test orthornomal mode.
dec = dec_map[(dst, rdt, 1)]
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dst(x, norm='ortho', type=1)
y2 = naive_dst1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
def test_dst4_definition_ortho(rdt, mdata_x):
# Test orthornomal mode.
dec = dec_map[(dst, rdt, 4)]
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dst(x, norm='ortho', type=4)
y2 = naive_dst4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, y2, decimal=dec)
@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
def test_idst_definition(fftwdata_size, rdt, type):
xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)
x = idst(yr, type=type)
dec = dec_map[(idst, rdt, type)]
assert_equal(x.dtype, dt)
assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
@pytest.mark.parametrize('routine', [dct, dst, idct, idst])
@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longfloat])
@pytest.mark.parametrize('shape, axis', [
((16,), -1), ((16, 2), 0), ((2, 16), 1)
])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('norm', [None, 'ortho'])
def test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x):
# Check input overwrite behavior
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
x = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
x = np.random.randn(*shape)
x = x.astype(dtype)
x2 = x.copy()
routine(x2, type, None, axis, norm, overwrite_x=overwrite_x)
sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
routine.__name__, x.dtype, x.shape, None, axis, overwrite_x)
if not overwrite_x:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
class Test_DCTN_IDCTN:
dec = 14
dct_type = [1, 2, 3, 4]
norms = [None, 'backward', 'ortho', 'forward']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [None,
1, (1,), [1],
0, (0,), [0],
(0, 1), [0, 1],
(-2, -1), [-2, -1]])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', ['ortho'])
def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=12)
@pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm):
y1 = funcn(self.data, type=dct_type, axes=None, norm=norm)
y2 = ref_2d(func, self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm):
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = funcn(fdata, type=dct_type, norm=norm)
y2 = ref_2d(func, fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
def test_axes_and_shape(self, fforward, finverse):
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, s=self.data.shape[0], axes=(0, 1))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, s=self.data.shape, axes=0)
@pytest.mark.parametrize('fforward', [dctn, dstn])
def test_shape(self, fforward):
tmp = fforward(self.data, s=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [1, (1,), [1],
0, (0,), [0]])
def test_shape_is_none_with_axes(self, fforward, finverse, axes):
tmp = fforward(self.data, s=None, axes=axes, norm='ortho')
tmp = finverse(tmp, s=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
@pytest.mark.parametrize('func', [dct, dctn, idct, idctn,
dst, dstn, idst, idstn])
def test_swapped_byte_order(func):
rng = np.random.RandomState(1234)
x = rng.rand(10)
swapped_dt = x.dtype.newbyteorder('S')
assert_allclose(func(x.astype(swapped_dt)), func(x))
| 16,438
| 32.277328
| 83
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/tests/test_basic.py
|
# Created by Pearu Peterson, September 2002
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less,
assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn,
rfft, irfft, rfftn, irfftn, fft2,
hfft, ihfft, hfftn, ihfftn)
from numpy import (arange, array, asarray, zeros, dot, exp, pi,
swapaxes, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFT._PYPOCKETFFT
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = f"size: {size} rdt: {rdt}"
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def swap_byteorder(arr):
"""Returns the same array with swapped byteorder"""
dtype = arr.dtype.newbyteorder('S')
return arr.astype(dtype)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(x.ndim):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(x.ndim):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
y = zeros(n//2+1, dtype=cdouble)
for i in range(n//2+1):
y[i] = dot(exp(i*w), x)
return y
def direct_irdft(x, n):
x = asarray(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
x1[i] = x[i]
if i > 0 and 2*i < n:
x1[n-i] = np.conj(x[i])
return direct_idft(x1).real
def direct_rdftn(x):
return fftn(rfft(x), axes=range(x.ndim - 1))
class _TestFFTBase:
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = fft(x.astype(complex))
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
class TestLongDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class TestFloat16FFT:
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = ifft(x.astype(self.cdt))
y2 = numpy.fft.ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
y = ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
@pytest.mark.skipif(np.longdouble is np.float64,
reason="Long double is aliased to double")
class TestLongDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
self.rtol = 1e-10
self.atol = 1e-10
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.rtol = 1e-10
self.atol = 1e-10
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.rtol = 1e-5
self.atol = 1e-4
class _TestRFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.cdt)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y1 = np.fft.rfft(x)
y = rfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
x = np.zeros(10, dtype=self.cdt)
with assert_raises(TypeError, match="x must be a real sequence"):
rfft(x)
# See gh-5790
class MockSeries:
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError as e:
raise AttributeError("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)) from e
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestRFFTLongDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longfloat
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2+3j,4+1j,1+2j,3+4j]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x1 = x1_1[:5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
x2 = x2_1[:5]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.cdt), n=len(xr))
y1 = direct_irdft(x, len(xr))
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2)
x[0] = 0
if n % 2 == 0:
x[-1] = np.real(x[-1])
y1 = np.fft.irfft(x)
y = irfft(x)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x), n=size)
y2 = rfft(irfft(x, n=(size*2-1)))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x), len(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x, 2 * len(x) - 1))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestIRFFTLongDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2:
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, s=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, s=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, s=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), s=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="shape requires more axes than are present"):
fftn(x, s=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(fftn(x, axes=[]), x, atol=1e-7)
class TestIfftn:
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
class TestRfftn:
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = rfftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random(self, size):
x = random([size, size])
assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10)
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_invalid_sizes(self, func):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
func([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
func([[1, 1], [2, 2]], (4, -3))
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_no_axes(self, func):
with assert_raises(ValueError,
match="at least 1 axis must be transformed"):
func([], axes=[])
def test_complex_input(self):
with assert_raises(TypeError, match="x must be a real sequence"):
rfftn(np.zeros(10, dtype=np.complex64))
class FakeArray:
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2:
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
# relied upon by users except for overwrite_x = False
class TestOverwrite:
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64, np.longfloat]
dtypes = real_dtypes + [np.complex64, np.complex128, np.longcomplex]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis])
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
def part_shape(shape, axes):
if axes is None:
return shape
else:
return tuple(np.take(shape, axes))
def should_overwrite(data, shape, axes):
s = part_shape(data.shape, axes)
return (overwrite_x and
np.prod(shape) <= np.prod(s)
and dtype in overwritable_dtypes)
for fftshape in fftshape_iter(part_shape(shape, axes)):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(data, fftshape, axes))
if data.ndim > 1:
# check fortran order
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(
data.T, fftshape, axes))
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
rfft, irfft, rfftn, irfftn])
def test_invalid_norm(func):
x = np.arange(10, dtype=float)
with assert_raises(ValueError,
match='Invalid norm value \'o\', should be'
' "backward", "ortho" or "forward"'):
func(x, norm='o')
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
irfft, irfftn, hfft, hfftn])
def test_swapped_byte_order_complex(func):
rng = np.random.RandomState(1234)
x = rng.rand(10) + 1j * rng.rand(10)
assert_allclose(func(swap_byteorder(x)), func(x))
@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn])
def test_swapped_byte_order_real(func):
rng = np.random.RandomState(1234)
x = rng.rand(10)
assert_allclose(func(swap_byteorder(x)), func(x))
| 35,617
| 34.022616
| 80
|
py
|
scipy
|
scipy-main/scipy/fft/_pocketfft/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/fft/tests/test_numpy.py
|
import queue
import threading
import multiprocessing
import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_almost_equal, assert_array_equal, assert_allclose
)
from pytest import raises as assert_raises
import scipy.fft as fft
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, fft.fft, [1, 2, 3], 0)
class TestFFT1D:
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1,maxlen):
assert_array_almost_equal(fft.ifft(fft.fft(x[0:i])), x[0:i],
decimal=12)
assert_array_almost_equal(fft.irfft(fft.rfft(xr[0:i]),i),
xr[0:i], decimal=12)
def test_fft(self):
x = random(30) + 1j*random(30)
expect = fft1(x)
assert_array_almost_equal(expect, fft.fft(x))
assert_array_almost_equal(expect, fft.fft(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30),
fft.fft(x, norm="ortho"))
assert_array_almost_equal(expect / 30, fft.fft(x, norm="forward"))
def test_ifft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(x, fft.ifft(fft.fft(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.ifft(fft.fft(x, norm=norm), norm=norm))
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
expect = fft.fft(fft.fft(x, axis=1), axis=0)
assert_array_almost_equal(expect, fft.fft2(x))
assert_array_almost_equal(expect, fft.fft2(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20),
fft.fft2(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20),
fft.fft2(x, norm="forward"))
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
expect = fft.ifft(fft.ifft(x, axis=1), axis=0)
assert_array_almost_equal(expect, fft.ifft2(x))
assert_array_almost_equal(expect, fft.ifft2(x, norm="backward"))
assert_array_almost_equal(expect * np.sqrt(30 * 20),
fft.ifft2(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20),
fft.ifft2(x, norm="forward"))
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0)
assert_array_almost_equal(expect, fft.fftn(x))
assert_array_almost_equal(expect, fft.fftn(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
fft.fftn(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20 * 10),
fft.fftn(x, norm="forward"))
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0)
assert_array_almost_equal(expect, fft.ifftn(x))
assert_array_almost_equal(expect, fft.ifftn(x, norm="backward"))
assert_array_almost_equal(fft.ifftn(x) * np.sqrt(30 * 20 * 10),
fft.ifftn(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20 * 10),
fft.ifftn(x, norm="forward"))
def test_rfft(self):
x = random(29)
for n in [x.size, 2*x.size]:
for norm in [None, "backward", "ortho", "forward"]:
assert_array_almost_equal(
fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
fft.rfft(x, n=n, norm=norm))
assert_array_almost_equal(fft.rfft(x, n=n) / np.sqrt(n),
fft.rfft(x, n=n, norm="ortho"))
def test_irfft(self):
x = random(30)
assert_array_almost_equal(x, fft.irfft(fft.rfft(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.irfft(fft.rfft(x, norm=norm), norm=norm))
def test_rfft2(self):
x = random((30, 20))
expect = fft.fft2(x)[:, :11]
assert_array_almost_equal(expect, fft.rfft2(x))
assert_array_almost_equal(expect, fft.rfft2(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20),
fft.rfft2(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20),
fft.rfft2(x, norm="forward"))
def test_irfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, fft.irfft2(fft.rfft2(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.irfft2(fft.rfft2(x, norm=norm), norm=norm))
def test_rfftn(self):
x = random((30, 20, 10))
expect = fft.fftn(x)[:, :, :6]
assert_array_almost_equal(expect, fft.rfftn(x))
assert_array_almost_equal(expect, fft.rfftn(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
fft.rfftn(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20 * 10),
fft.rfftn(x, norm="forward"))
def test_irfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, fft.irfftn(fft.rfftn(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.irfftn(fft.rfftn(x, norm=norm), norm=norm))
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
expect = fft.fft(x)
assert_array_almost_equal(expect, fft.hfft(x_herm))
assert_array_almost_equal(expect, fft.hfft(x_herm, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30),
fft.hfft(x_herm, norm="ortho"))
assert_array_almost_equal(expect / 30,
fft.hfft(x_herm, norm="forward"))
def test_ihfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(x_herm, fft.ihfft(fft.hfft(x_herm)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x_herm, fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm))
def test_hfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, fft.hfft2(fft.ihfft2(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm))
def test_ihfft2(self):
x = random((30, 20))
expect = fft.ifft2(x)[:, :11]
assert_array_almost_equal(expect, fft.ihfft2(x))
assert_array_almost_equal(expect, fft.ihfft2(x, norm="backward"))
assert_array_almost_equal(expect * np.sqrt(30 * 20),
fft.ihfft2(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20),
fft.ihfft2(x, norm="forward"))
def test_hfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, fft.hfftn(fft.ihfftn(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm))
def test_ihfftn(self):
x = random((30, 20, 10))
expect = fft.ifftn(x)[:, :, :6]
assert_array_almost_equal(expect, fft.ihfftn(x))
assert_array_almost_equal(expect, fft.ihfftn(x, norm="backward"))
assert_array_almost_equal(expect * np.sqrt(30 * 20 * 10),
fft.ihfftn(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20 * 10),
fft.ihfftn(x, norm="forward"))
@pytest.mark.parametrize("op", [fft.fftn, fft.ifftn,
fft.rfftn, fft.irfftn,
fft.hfftn, fft.ihfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_array_almost_equal(op_tr, tr_op)
@pytest.mark.parametrize("op", [fft.fft2, fft.ifft2,
fft.rfft2, fft.irfft2,
fft.hfft2, fft.ihfft2,
fft.fftn, fft.ifftn,
fft.rfftn, fft.irfftn,
fft.hfftn, fft.ihfftn])
def test_axes_subset_with_shape(self, op):
x = random((16, 8, 4))
axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]
for a in axes:
# different shape on the first two axes
shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]
for ax in range(x.ndim)])
# transform only the first two axes
op_tr = op(np.transpose(x, a), s=shape[:2], axes=(0, 1))
tr_op = np.transpose(op(x, s=shape[:2], axes=a[:2]), a)
assert_array_almost_equal(op_tr, tr_op)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(fft.fft, fft.ifft),
(fft.rfft, fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(fft.ihfft, fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in ['backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_array_almost_equal(x_norm,
np.linalg.norm(tmp))
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted
x = random(30).astype(dtype)
assert_array_almost_equal(fft.ifft(fft.fft(x)), x)
assert_array_almost_equal(fft.irfft(fft.rfft(x)), x)
assert_array_almost_equal(fft.hfft(fft.ihfft(x), len(x)), x)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.longfloat,
np.complex64, np.complex128, np.longcomplex])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[fft.fft, fft.fft2, fft.fftn,
fft.ifft, fft.ifft2, fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_array_almost_equal(X_res, Y_res)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_array_almost_equal(X_res, Y_res)
else:
raise ValueError
class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape, dtype=np.complex128)
self._test_mtsame(fft.fft, a)
def test_ifft(self):
a = np.full(self.input_shape, 1+0j)
self._test_mtsame(fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(fft.rfft, a)
def test_irfft(self):
a = np.full(self.input_shape, 1+0j)
self._test_mtsame(fft.irfft, a)
def test_hfft(self):
a = np.ones(self.input_shape, np.complex64)
self._test_mtsame(fft.hfft, a)
def test_ihfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(fft.ihfft, a)
@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft])
def test_multiprocess(func):
# Test that fft still works after fork (gh-10422)
with multiprocessing.Pool(2) as p:
res = p.map(func, [np.ones(100) for _ in range(4)])
expect = func(np.ones(100))
for x in res:
assert_allclose(x, expect)
class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
| 14,432
| 38.542466
| 81
|
py
|
scipy
|
scipy-main/scipy/fft/tests/test_real_transforms.py
|
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
from scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn
import scipy.fft as fft
from scipy import fftpack
import math
SQRT_2 = math.sqrt(2)
# scipy.fft wraps the fftpack versions but with normalized inverse transforms.
# So, the forward transforms and definitions are already thoroughly tested in
# fftpack/test_real_transforms.py
@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)])
@pytest.mark.parametrize("type", [1, 2, 3, 4])
@pytest.mark.parametrize("n", [2, 3, 4, 5, 10, 16])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
@pytest.mark.parametrize("orthogonalize", [False, True])
def test_identity_1d(forward, backward, type, n, axis, norm, orthogonalize):
# Test the identity f^-1(f(x)) == x
x = np.random.rand(n, n)
y = forward(x, type, axis=axis, norm=norm, orthogonalize=orthogonalize)
z = backward(y, type, axis=axis, norm=norm, orthogonalize=orthogonalize)
assert_allclose(z, x)
pad = [(0, 0)] * 2
pad[axis] = (0, 4)
y2 = np.pad(y, pad, mode='edge')
z2 = backward(y2, type, n, axis, norm, orthogonalize=orthogonalize)
assert_allclose(z2, x)
@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)])
@pytest.mark.parametrize("type", [1, 2, 3, 4])
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64,
np.complex64, np.complex128])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
@pytest.mark.parametrize("overwrite_x", [True, False])
def test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm,
overwrite_x):
# Test the identity f^-1(f(x)) == x
x = np.random.rand(7, 8).astype(dtype)
x_orig = x.copy()
y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x)
y_orig = y.copy()
z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x)
if not overwrite_x:
assert_allclose(z, x, rtol=1e-6, atol=1e-6)
assert_array_equal(x, x_orig)
assert_array_equal(y, y_orig)
else:
assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)
@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)])
@pytest.mark.parametrize("type", [1, 2, 3, 4])
@pytest.mark.parametrize("shape, axes",
[
((4, 4), 0),
((4, 4), 1),
((4, 4), None),
((4, 4), (0, 1)),
((10, 12), None),
((10, 12), (0, 1)),
((4, 5, 6), None),
((4, 5, 6), 1),
((4, 5, 6), (0, 2)),
])
@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
@pytest.mark.parametrize("orthogonalize", [False, True])
def test_identity_nd(forward, backward, type, shape, axes, norm,
orthogonalize):
# Test the identity f^-1(f(x)) == x
x = np.random.random(shape)
if axes is not None:
shape = np.take(shape, axes)
y = forward(x, type, axes=axes, norm=norm, orthogonalize=orthogonalize)
z = backward(y, type, axes=axes, norm=norm, orthogonalize=orthogonalize)
assert_allclose(z, x)
if axes is None:
pad = [(0, 4)] * x.ndim
elif isinstance(axes, int):
pad = [(0, 0)] * x.ndim
pad[axes] = (0, 4)
else:
pad = [(0, 0)] * x.ndim
for a in axes:
pad[a] = (0, 4)
y2 = np.pad(y, pad, mode='edge')
z2 = backward(y2, type, shape, axes, norm, orthogonalize=orthogonalize)
assert_allclose(z2, x)
@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)])
@pytest.mark.parametrize("type", [1, 2, 3, 4])
@pytest.mark.parametrize("shape, axes",
[
((4, 5), 0),
((4, 5), 1),
((4, 5), None),
])
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64,
np.complex64, np.complex128])
@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
@pytest.mark.parametrize("overwrite_x", [False, True])
def test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype,
norm, overwrite_x):
# Test the identity f^-1(f(x)) == x
x = np.random.random(shape).astype(dtype)
x_orig = x.copy()
if axes is not None:
shape = np.take(shape, axes)
y = forward(x, type, axes=axes, norm=norm)
y_orig = y.copy()
z = backward(y, type, axes=axes, norm=norm)
if overwrite_x:
assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)
else:
assert_allclose(z, x, rtol=1e-6, atol=1e-6)
assert_array_equal(x, x_orig)
assert_array_equal(y, y_orig)
@pytest.mark.parametrize("func", ['dct', 'dst', 'dctn', 'dstn'])
@pytest.mark.parametrize("type", [1, 2, 3, 4])
@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
def test_fftpack_equivalience(func, type, norm):
x = np.random.rand(8, 16)
fft_res = getattr(fft, func)(x, type, norm=norm)
fftpack_res = getattr(fftpack, func)(x, type, norm=norm)
assert_allclose(fft_res, fftpack_res)
@pytest.mark.parametrize("func", [dct, dst, dctn, dstn])
@pytest.mark.parametrize("type", [1, 2, 3, 4])
def test_orthogonalize_default(func, type):
# Test orthogonalize is the default when norm="ortho", but not otherwise
x = np.random.rand(100)
for norm, ortho in [
("forward", False),
("backward", False),
("ortho", True),
]:
a = func(x, type=type, norm=norm, orthogonalize=ortho)
b = func(x, type=type, norm=norm)
assert_allclose(a, b)
@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
@pytest.mark.parametrize("func, type", [
(dct, 4), (dst, 1), (dst, 4)])
def test_orthogonalize_noop(func, type, norm):
# Transforms where orthogonalize is a no-op
x = np.random.rand(100)
y1 = func(x, type=type, norm=norm, orthogonalize=True)
y2 = func(x, type=type, norm=norm, orthogonalize=False)
assert_allclose(y1, y2)
@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
def test_orthogonalize_dct1(norm):
x = np.random.rand(100)
x2 = x.copy()
x2[0] *= SQRT_2
x2[-1] *= SQRT_2
y1 = dct(x, type=1, norm=norm, orthogonalize=True)
y2 = dct(x2, type=1, norm=norm, orthogonalize=False)
y2[0] /= SQRT_2
y2[-1] /= SQRT_2
assert_allclose(y1, y2)
@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
@pytest.mark.parametrize("func", [dct, dst])
def test_orthogonalize_dcst2(func, norm):
x = np.random.rand(100)
y1 = func(x, type=2, norm=norm, orthogonalize=True)
y2 = func(x, type=2, norm=norm, orthogonalize=False)
y2[0 if func == dct else -1] /= SQRT_2
assert_allclose(y1, y2)
@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
@pytest.mark.parametrize("func", [dct, dst])
def test_orthogonalize_dcst3(func, norm):
x = np.random.rand(100)
x2 = x.copy()
x2[0 if func == dct else -1] *= SQRT_2
y1 = func(x, type=3, norm=norm, orthogonalize=True)
y2 = func(x2, type=3, norm=norm, orthogonalize=False)
assert_allclose(y1, y2)
| 7,637
| 34.361111
| 78
|
py
|
scipy
|
scipy-main/scipy/fft/tests/test_fftlog.py
|
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.fft._fftlog import fht, ifht, fhtoffset
from scipy.special import poch
def test_fht_agrees_with_fftlog():
# check that fht numerically agrees with the output from Fortran FFTLog,
# the results were generated with the provided `fftlogtest` program,
# after fixing how the k array is generated (divide range by n-1, not n)
# test function, analytical Hankel transform is of the same form
def f(r, mu):
return r**(mu+1)*np.exp(-r**2/2)
r = np.logspace(-4, 4, 16)
dln = np.log(r[1]/r[0])
mu = 0.3
offset = 0.0
bias = 0.0
a = f(r, mu)
# test 1: compute as given
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02,
-0.1949518286432330E-02, +0.3789220182554077E-02,
+0.5093959119952945E-03, +0.2785387803618774E-01,
+0.9944952700848897E-01, +0.4599202164586588E+00,
+0.3157462160881342E+00, -0.8201236844404755E-03,
-0.7834031308271878E-03, +0.3931444945110708E-03,
-0.2697710625194777E-03, +0.3568398050238820E-03,
-0.5554454827797206E-03, +0.8286331026468585E-03]
assert_allclose(ours, theirs)
# test 2: change to optimal offset
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05,
+0.3150140927838524E-03, +0.9149121960963704E-03,
+0.5808089753959363E-02, +0.2548065256377240E-01,
+0.1339477692089897E+00, +0.4821530509479356E+00,
+0.2659899781579785E+00, -0.1116475278448113E-01,
+0.1791441617592385E-02, -0.4181810476548056E-03,
+0.1314963536765343E-03, -0.5422057743066297E-04,
+0.3208681804170443E-04, -0.2696849476008234E-04]
assert_allclose(ours, theirs)
# test 3: positive bias
bias = 0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00,
+0.1065374386206564E+00, -0.5121739602708132E-01,
+0.2636649319269470E-01, +0.1697209218849693E-01,
+0.1250215614723183E+00, +0.4739583261486729E+00,
+0.2841149874912028E+00, -0.8312764741645729E-02,
+0.1024233505508988E-02, -0.1644902767389120E-03,
+0.3305775476926270E-04, -0.7786993194882709E-05,
+0.1962258449520547E-05, -0.8977895734909250E-06]
assert_allclose(ours, theirs)
# test 4: negative bias
bias = -0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04,
+0.2123969254700955E-03, +0.1009558244834628E-02,
+0.5131386375222176E-02, +0.2461678673516286E-01,
+0.1235812845384476E+00, +0.4719570096404403E+00,
+0.2893487490631317E+00, -0.1686570611318716E-01,
+0.2231398155172505E-01, -0.1480742256379873E-01,
+0.1692387813500801E+00, +0.3097490354365797E+00,
+2.7593607182401860E+00, 10.5251075070045800E+00]
assert_allclose(ours, theirs)
@pytest.mark.parametrize('optimal', [True, False])
@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0])
@pytest.mark.parametrize('bias', [0, 0.1, -0.1])
@pytest.mark.parametrize('n', [64, 63])
def test_fht_identity(n, bias, offset, optimal):
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(n)
dln = rng.uniform(-1, 1)
mu = rng.uniform(-2, 2)
if optimal:
offset = fhtoffset(dln, mu, initial=offset, bias=bias)
A = fht(a, dln, mu, offset=offset, bias=bias)
a_ = ifht(A, dln, mu, offset=offset, bias=bias)
assert_allclose(a, a_)
def test_fht_special_cases():
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(64)
dln = rng.uniform(-1, 1)
# let xp = (mu+1+q)/2, xm = (mu+1-q)/2, M = {0, -1, -2, ...}
# case 1: xp in M, xm in M => well-defined transform
mu, bias = -4.0, 1.0
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 2: xp not in M, xm in M => well-defined transform
mu, bias = -2.5, 0.5
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 3: xp in M, xm not in M => singular transform
mu, bias = -3.5, 0.5
with pytest.warns(Warning) as record:
fht(a, dln, mu, bias=bias)
assert record, 'fht did not warn about a singular transform'
# case 4: xp not in M, xm in M => singular inverse transform
mu, bias = -2.5, 0.5
with pytest.warns(Warning) as record:
ifht(a, dln, mu, bias=bias)
assert record, 'ifht did not warn about a singular transform'
@pytest.mark.parametrize('n', [64, 63])
def test_fht_exact(n):
rng = np.random.RandomState(3491349965)
# for a(r) a power law r^\gamma, the fast Hankel transform produces the
# exact continuous Hankel transform if biased with q = \gamma
mu = rng.uniform(0, 3)
# convergence of HT: -1-mu < gamma < 1/2
gamma = rng.uniform(-1-mu, 1/2)
r = np.logspace(-2, 2, n)
a = r**gamma
dln = np.log(r[1]/r[0])
offset = fhtoffset(dln, mu, initial=0.0, bias=gamma)
A = fht(a, dln, mu, offset=offset, bias=gamma)
k = np.exp(offset)/r[::-1]
# analytical result
At = (2/k)**gamma * poch((mu+1-gamma)/2, gamma)
assert_allclose(A, At)
| 5,819
| 34.925926
| 76
|
py
|
scipy
|
scipy-main/scipy/fft/tests/mock_backend.py
|
import numpy as np
class _MockFunction:
def __init__(self, return_value = None):
self.number_calls = 0
self.return_value = return_value
self.last_args = ([], {})
def __call__(self, *args, **kwargs):
self.number_calls += 1
self.last_args = (args, kwargs)
return self.return_value
fft = _MockFunction(np.random.random(10))
fft2 = _MockFunction(np.random.random(10))
fftn = _MockFunction(np.random.random(10))
ifft = _MockFunction(np.random.random(10))
ifft2 = _MockFunction(np.random.random(10))
ifftn = _MockFunction(np.random.random(10))
rfft = _MockFunction(np.random.random(10))
rfft2 = _MockFunction(np.random.random(10))
rfftn = _MockFunction(np.random.random(10))
irfft = _MockFunction(np.random.random(10))
irfft2 = _MockFunction(np.random.random(10))
irfftn = _MockFunction(np.random.random(10))
hfft = _MockFunction(np.random.random(10))
hfft2 = _MockFunction(np.random.random(10))
hfftn = _MockFunction(np.random.random(10))
ihfft = _MockFunction(np.random.random(10))
ihfft2 = _MockFunction(np.random.random(10))
ihfftn = _MockFunction(np.random.random(10))
dct = _MockFunction(np.random.random(10))
idct = _MockFunction(np.random.random(10))
dctn = _MockFunction(np.random.random(10))
idctn = _MockFunction(np.random.random(10))
dst = _MockFunction(np.random.random(10))
idst = _MockFunction(np.random.random(10))
dstn = _MockFunction(np.random.random(10))
idstn = _MockFunction(np.random.random(10))
fht = _MockFunction(np.random.random(10))
ifht = _MockFunction(np.random.random(10))
__ua_domain__ = "numpy.scipy.fft"
def __ua_function__(method, args, kwargs):
fn = globals().get(method.__name__)
return (fn(*args, **kwargs) if fn is not None
else NotImplemented)
| 1,769
| 28.5
| 49
|
py
|
scipy
|
scipy-main/scipy/fft/tests/test_backend.py
|
from functools import partial
import numpy as np
import scipy.fft
from scipy.fft import _fftlog, _pocketfft, set_backend
from scipy.fft.tests import mock_backend
from numpy.testing import assert_allclose, assert_equal
import pytest
fnames = ('fft', 'fft2', 'fftn',
'ifft', 'ifft2', 'ifftn',
'rfft', 'rfft2', 'rfftn',
'irfft', 'irfft2', 'irfftn',
'dct', 'idct', 'dctn', 'idctn',
'dst', 'idst', 'dstn', 'idstn',
'fht', 'ifht')
np_funcs = (np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn,
np.fft.rfft, np.fft.rfft2, np.fft.rfftn,
np.fft.irfft, np.fft.irfft2, np.fft.irfftn,
np.fft.hfft, _pocketfft.hfft2, _pocketfft.hfftn, # np has no hfftn
np.fft.ihfft, _pocketfft.ihfft2, _pocketfft.ihfftn,
_pocketfft.dct, _pocketfft.idct, _pocketfft.dctn, _pocketfft.idctn,
_pocketfft.dst, _pocketfft.idst, _pocketfft.dstn, _pocketfft.idstn,
# must provide required kwargs for fht, ifht
partial(_fftlog.fht, dln=2, mu=0.5),
partial(_fftlog.ifht, dln=2, mu=0.5))
funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn,
scipy.fft.dct, scipy.fft.idct, scipy.fft.dctn, scipy.fft.idctn,
scipy.fft.dst, scipy.fft.idst, scipy.fft.dstn, scipy.fft.idstn,
# must provide required kwargs for fht, ifht
partial(scipy.fft.fht, dln=2, mu=0.5),
partial(scipy.fft.ifht, dln=2, mu=0.5))
mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn,
mock_backend.dct, mock_backend.idct,
mock_backend.dctn, mock_backend.idctn,
mock_backend.dst, mock_backend.idst,
mock_backend.dstn, mock_backend.idstn,
mock_backend.fht, mock_backend.ifht)
@pytest.mark.parametrize("func, np_func, mock", zip(funcs, np_funcs, mocks))
def test_backend_call(func, np_func, mock):
x = np.arange(20).reshape((10,2))
answer = np_func(x)
assert_allclose(func(x), answer, atol=1e-10)
with set_backend(mock_backend, only=True):
mock.number_calls = 0
y = func(x)
assert_equal(y, mock.return_value)
assert_equal(mock.number_calls, 1)
assert_allclose(func(x), answer, atol=1e-10)
plan_funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn)
plan_mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn)
@pytest.mark.parametrize("func, mock", zip(plan_funcs, plan_mocks))
def test_backend_plan(func, mock):
x = np.arange(20).reshape((10, 2))
with pytest.raises(NotImplementedError, match='precomputed plan'):
func(x, plan='foo')
with set_backend(mock_backend, only=True):
mock.number_calls = 0
y = func(x, plan='foo')
assert_equal(y, mock.return_value)
assert_equal(mock.number_calls, 1)
assert_equal(mock.last_args[1]['plan'], 'foo')
| 4,256
| 42
| 79
|
py
|
scipy
|
scipy-main/scipy/fft/tests/test_helper.py
|
from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes
from numpy.testing import assert_equal, assert_array_equal
from pytest import raises as assert_raises
import pytest
import numpy as np
import sys
_5_smooth_numbers = [
2, 3, 4, 5, 6, 8, 9, 10,
2 * 3 * 5,
2**3 * 3**5,
2**3 * 3**3 * 5**2,
]
def test_next_fast_len():
for n in _5_smooth_numbers:
assert_equal(next_fast_len(n), n)
def _assert_n_smooth(x, n):
x_orig = x
if n < 2:
assert False
while True:
q, r = divmod(x, 2)
if r != 0:
break
x = q
for d in range(3, n+1, 2):
while True:
q, r = divmod(x, d)
if r != 0:
break
x = q
assert x == 1, \
f'x={x_orig} is not {n}-smooth, remainder={x}'
class TestNextFastLen:
def test_next_fast_len(self):
np.random.seed(1234)
def nums():
yield from range(1, 1000)
yield 2**5 * 3**5 * 4**5 + 1
for n in nums():
m = next_fast_len(n)
_assert_n_smooth(m, 11)
assert m == next_fast_len(n, False)
m = next_fast_len(n, True)
_assert_n_smooth(m, 5)
def test_np_integers(self):
ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]
for ityp in ITYPES:
x = ityp(12345)
testN = next_fast_len(x)
assert_equal(testN, next_fast_len(int(x)))
def testnext_fast_len_small(self):
hams = {
1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000
}
for x, y in hams.items():
assert_equal(next_fast_len(x, True), y)
@pytest.mark.xfail(sys.maxsize < 2**32,
reason="Hamming Numbers too large for 32-bit",
raises=ValueError, strict=True)
def testnext_fast_len_big(self):
hams = {
510183360: 510183360, 510183360 + 1: 512000000,
511000000: 512000000,
854296875: 854296875, 854296875 + 1: 859963392,
196608000000: 196608000000, 196608000000 + 1: 196830000000,
8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
206391214080000: 206391214080000,
206391214080000 + 1: 206624260800000,
470184984576000: 470184984576000,
470184984576000 + 1: 470715894135000,
7222041363087360: 7222041363087360,
7222041363087360 + 1: 7230196133913600,
# power of 5 5**23
11920928955078125: 11920928955078125,
11920928955078125 - 1: 11920928955078125,
# power of 3 3**34
16677181699666569: 16677181699666569,
16677181699666569 - 1: 16677181699666569,
# power of 2 2**54
18014398509481984: 18014398509481984,
18014398509481984 - 1: 18014398509481984,
# above this, int(ceil(n)) == int(ceil(n+1))
19200000000000000: 19200000000000000,
19200000000000000 + 1: 19221679687500000,
288230376151711744: 288230376151711744,
288230376151711744 + 1: 288325195312500000,
288325195312500000 - 1: 288325195312500000,
288325195312500000: 288325195312500000,
288325195312500000 + 1: 288555831593533440,
}
for x, y in hams.items():
assert_equal(next_fast_len(x, True), y)
def test_keyword_args(self):
assert next_fast_len(11, real=True) == 12
assert next_fast_len(target=7, real=False) == 7
class Test_init_nd_shape_and_axes:
def test_py_0d_defaults(self):
x = np.array(4)
shape = None
axes = None
shape_expected = np.array([])
axes_expected = np.array([])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_0d_defaults(self):
x = np.array(7.)
shape = None
axes = None
shape_expected = np.array([])
axes_expected = np.array([])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_py_1d_defaults(self):
x = np.array([1, 2, 3])
shape = None
axes = None
shape_expected = np.array([3])
axes_expected = np.array([0])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_1d_defaults(self):
x = np.arange(0, 1, .1)
shape = None
axes = None
shape_expected = np.array([10])
axes_expected = np.array([0])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_py_2d_defaults(self):
x = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
shape = None
axes = None
shape_expected = np.array([2, 4])
axes_expected = np.array([0, 1])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_2d_defaults(self):
x = np.arange(0, 1, .1).reshape(5, 2)
shape = None
axes = None
shape_expected = np.array([5, 2])
axes_expected = np.array([0, 1])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_defaults(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = None
axes = None
shape_expected = np.array([6, 2, 5, 3, 4])
axes_expected = np.array([0, 1, 2, 3, 4])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_set_shape(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = [10, -1, -1, 1, 4]
axes = None
shape_expected = np.array([10, 2, 5, 1, 4])
axes_expected = np.array([0, 1, 2, 3, 4])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_set_axes(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = None
axes = [4, 1, 2]
shape_expected = np.array([4, 2, 5])
axes_expected = np.array([4, 1, 2])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_set_shape_axes(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = [10, -1, 2]
axes = [1, 0, 3]
shape_expected = np.array([10, 6, 2])
axes_expected = np.array([1, 0, 3])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_shape_axes_subset(self):
x = np.zeros((2, 3, 4, 5))
shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None)
assert_array_equal(shape, [5, 5, 5])
assert_array_equal(axes, [1, 2, 3])
def test_errors(self):
x = np.zeros(1)
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
_init_nd_shape_and_axes(x, shape=None, axes=[1])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
_init_nd_shape_and_axes(x, shape=None, axes=[-2])
with assert_raises(ValueError,
match="all axes must be unique"):
_init_nd_shape_and_axes(x, shape=None, axes=[0, 0])
with assert_raises(ValueError, match="shape must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None)
with assert_raises(ValueError, match="shape must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
_init_nd_shape_and_axes(np.zeros([1, 1, 1, 1]),
shape=[1, 2, 3], axes=[1])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[0\]\) specified"):
_init_nd_shape_and_axes(x, shape=[0], axes=None)
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[-2\]\) specified"):
_init_nd_shape_and_axes(x, shape=-2, axes=None)
| 9,795
| 31.54485
| 80
|
py
|
scipy
|
scipy-main/scipy/fft/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/fft/tests/test_multithreading.py
|
from scipy import fft
import numpy as np
import pytest
from numpy.testing import assert_allclose
import multiprocessing
import os
@pytest.fixture(scope='module')
def x():
return np.random.randn(512, 128) # Must be large enough to qualify for mt
@pytest.mark.parametrize("func", [
fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn,
fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn,
fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn,
fft.dct, fft.idct, fft.dctn, fft.idctn,
fft.dst, fft.idst, fft.dstn, fft.idstn,
])
@pytest.mark.parametrize("workers", [2, -1])
def test_threaded_same(x, func, workers):
expected = func(x, workers=1)
actual = func(x, workers=workers)
assert_allclose(actual, expected)
def _mt_fft(x):
return fft.fft(x, workers=2)
def test_mixed_threads_processes(x):
# Test that the fft threadpool is safe to use before & after fork
expect = fft.fft(x, workers=2)
with multiprocessing.Pool(2) as p:
res = p.map(_mt_fft, [x for _ in range(4)])
for r in res:
assert_allclose(r, expect)
fft.fft(x, workers=2)
def test_invalid_workers(x):
cpus = os.cpu_count()
fft.ifft([1], workers=-cpus)
with pytest.raises(ValueError, match='workers must not be zero'):
fft.fft(x, workers=0)
with pytest.raises(ValueError, match='workers value out of range'):
fft.ifft(x, workers=-cpus-1)
def test_set_get_workers():
cpus = os.cpu_count()
assert fft.get_workers() == 1
with fft.set_workers(4):
assert fft.get_workers() == 4
with fft.set_workers(-1):
assert fft.get_workers() == cpus
assert fft.get_workers() == 4
assert fft.get_workers() == 1
with fft.set_workers(-cpus):
assert fft.get_workers() == 1
def test_set_workers_invalid():
with pytest.raises(ValueError, match='workers must not be zero'):
with fft.set_workers(0):
pass
with pytest.raises(ValueError, match='workers value out of range'):
with fft.set_workers(-os.cpu_count()-1):
pass
| 2,132
| 24.392857
| 78
|
py
|
scipy
|
scipy-main/scipy/fft/tests/test_fft_function.py
|
import numpy as np
import subprocess
import sys
TEST_BODY = r"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
import scipy
import sys
import pytest
np.random.seed(1234)
x = np.random.randn(10) + 1j * np.random.randn(10)
X = np.fft.fft(x)
# Callable before scipy.fft is imported
with pytest.deprecated_call(match=r'2\.0\.0'):
y = scipy.ifft(X)
assert_allclose(y, x)
# Callable after scipy.fft is imported
import scipy.fft
with pytest.deprecated_call(match=r'2\.0\.0'):
y = scipy.ifft(X)
assert_allclose(y, x)
"""
def test_fft_function():
# Historically, scipy.fft was an alias for numpy.fft.fft
# Ensure there are no conflicts with the FFT module (gh-10253)
# Test must run in a subprocess so scipy.fft is not already imported
subprocess.check_call([sys.executable, '-c', TEST_BODY])
# scipy.fft is the correct module
from scipy import fft
assert not callable(fft)
assert fft.__name__ == 'scipy.fft'
from scipy import ifft
assert ifft.__wrapped__ is np.fft.ifft
| 1,048
| 22.840909
| 72
|
py
|
scipy
|
scipy-main/doc/source/scipyoptdoc.py
|
"""
===========
scipyoptdoc
===========
Proper docstrings for scipy.optimize.minimize et al.
Usage::
.. scipy-optimize:function:: scipy.optimize.minimize
:impl: scipy.optimize._optimize._minimize_nelder_mead
:method: Nelder-Mead
Produces output similar to autodoc, except
- The docstring is obtained from the 'impl' function
- The call signature is mangled so that the default values for method keyword
and options dict are substituted
- 'Parameters' section is replaced by 'Options' section
- See Also link to the actual function documentation is inserted
"""
import sys
import sphinx
import inspect
import textwrap
import pydoc
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
from numpydoc.numpydoc import mangle_docstrings
from docutils.statemachine import ViewList
from sphinx.domains.python import PythonDomain
from scipy._lib._util import getfullargspec_no_self
def setup(app):
app.add_domain(ScipyOptimizeInterfaceDomain)
return {'parallel_read_safe': True}
def _option_required_str(x):
if not x:
raise ValueError("value is required")
return str(x)
def _import_object(name):
parts = name.split('.')
module_name = '.'.join(parts[:-1])
__import__(module_name)
obj = getattr(sys.modules[module_name], parts[-1])
return obj
class ScipyOptimizeInterfaceDomain(PythonDomain):
name = 'scipy-optimize'
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.directives = dict(self.directives)
self.directives['function'] = wrap_mangling_directive(self.directives['function'])
BLURB = """
.. seealso:: For documentation for the rest of the parameters, see `%s`
"""
def wrap_mangling_directive(base_directive):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
# Interface function
name = self.arguments[0].strip()
obj = _import_object(name)
args, varargs, keywords, defaults = getfullargspec_no_self(obj)[:4]
# Implementation function
impl_name = self.options['impl']
impl_obj = _import_object(impl_name)
impl_args, impl_varargs, impl_keywords, impl_defaults = getfullargspec_no_self(impl_obj)[:4]
# Format signature taking implementation into account
args = list(args)
defaults = list(defaults)
def set_default(arg, value):
j = args.index(arg)
defaults[len(defaults) - (len(args) - j)] = value
def remove_arg(arg):
if arg not in args:
return
j = args.index(arg)
if j < len(args) - len(defaults):
del args[j]
else:
del defaults[len(defaults) - (len(args) - j)]
del args[j]
options = []
for j, opt_name in enumerate(impl_args):
if opt_name in args:
continue
if j >= len(impl_args) - len(impl_defaults):
options.append((opt_name, impl_defaults[len(impl_defaults) - (len(impl_args) - j)]))
else:
options.append((opt_name, None))
set_default('options', dict(options))
if 'method' in self.options and 'method' in args:
set_default('method', self.options['method'].strip())
elif 'solver' in self.options and 'solver' in args:
set_default('solver', self.options['solver'].strip())
special_args = {'fun', 'x0', 'args', 'tol', 'callback', 'method',
'options', 'solver'}
for arg in list(args):
if arg not in impl_args and arg not in special_args:
remove_arg(arg)
signature = str(inspect.signature(obj))
# Produce output
self.options['noindex'] = True
self.arguments[0] = name + signature
lines = textwrap.dedent(pydoc.getdoc(impl_obj)).splitlines()
# Change "Options" to "Other Parameters", run numpydoc, reset
new_lines = []
for line in lines:
# Remap Options to the "Other Parameters" numpydoc section
# along with correct heading length
if line.strip() == 'Options':
line = "Other Parameters"
new_lines.extend([line, "-"*len(line)])
continue
new_lines.append(line)
# use impl_name instead of name here to avoid duplicate refs
mangle_docstrings(env.app, 'function', impl_name,
None, None, new_lines)
lines = new_lines
new_lines = []
for line in lines:
if line.strip() == ':Other Parameters:':
new_lines.extend((BLURB % (name,)).splitlines())
new_lines.append('\n')
new_lines.append(':Options:')
else:
new_lines.append(line)
self.content = ViewList(new_lines, self.content.parent)
return base_directive.run(self)
option_spec = dict(base_directive.option_spec)
option_spec['impl'] = _option_required_str
option_spec['method'] = _option_required_str
return directive
| 5,487
| 33.515723
| 104
|
py
|
scipy
|
scipy-main/doc/source/conf.py
|
import math
import os
from os.path import relpath, dirname
import re
import sys
import warnings
from datetime import date
from docutils import nodes
from docutils.parsers.rst import Directive
import matplotlib
import matplotlib.pyplot as plt
from numpydoc.docscrape_sphinx import SphinxDocString
from sphinx.util import inspect
import scipy
from scipy._lib._util import _rng_html_rewrite
# Workaround for sphinx-doc/sphinx#6573
# ua._Function should not be treated as an attribute
import scipy._lib.uarray as ua
from scipy.stats._distn_infrastructure import rv_generic # noqa: E402
from scipy.stats._multivariate import multi_rv_generic # noqa: E402
old_isdesc = inspect.isdescriptor
inspect.isdescriptor = (lambda obj: old_isdesc(obj)
and not isinstance(obj, ua._Function))
# Currently required to build scipy.fft docs
os.environ['_SCIPY_BUILDING_DOC'] = 'True'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import numpydoc.docscrape as np_docscrape # noqa:E402
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'numpydoc',
'sphinx_design',
'scipyoptdoc',
'doi_role',
'matplotlib.sphinxext.plot_directive',
'myst_nb',
]
# Do some matplotlib config in case users have a matplotlibrc that will break
# things
matplotlib.use('agg')
plt.ioff()
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-%s, The SciPy community' % date.today().year
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
version = re.sub(r'\.dev.*$', r'.dev', scipy.__version__)
release = version
if os.environ.get('CIRCLE_JOB', False) and \
os.environ.get('CIRCLE_BRANCH', '') != 'main':
version = os.environ['CIRCLE_BRANCH']
release = version
print(f"{project} (VERSION {version})")
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Ensure all our internal links work
nitpicky = True
nitpick_ignore = [
# This ignores errors for classes (OptimizeResults, sparse.dok_matrix)
# which inherit methods from `dict`. missing references to builtins get
# ignored by default (see https://github.com/sphinx-doc/sphinx/pull/7254),
# but that fix doesn't work for inherited methods.
("py:class", "a shallow copy of D"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "an object providing a view on D's values"),
("py:class", "None. Remove all items from D."),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "v, remove specified key and return the corresponding value."),
]
exclude_patterns = [ # glob-style
]
# be strict about warnings in our examples, we should write clean code
# (exceptions permitted for pedagogical purposes below)
warnings.resetwarnings()
warnings.filterwarnings('error')
# allow these and show them
warnings.filterwarnings('default', module='sphinx') # internal warnings
# global weird ones that can be safely ignored
for key in (
r"OpenSSL\.rand is deprecated", # OpenSSL package in linkcheck
r"distutils Version", # distutils
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*" + key, category=DeprecationWarning)
warnings.filterwarnings( # matplotlib<->pyparsing issue
'ignore', message="Exception creating Regex for oneOf.*",
category=SyntaxWarning)
# warnings in examples (mostly) that we allow
# TODO: eventually these should be eliminated!
for key in (
'invalid escape sequence', # numpydoc 0.8 has some bad escape chars
'The integral is probably divergent', # stats.mielke example
'underflow encountered in square', # signal.filtfilt underflow
'underflow encountered in multiply', # scipy.spatial.HalfspaceIntersection
'underflow encountered in nextafter', # tuterial/interpolate.rst
# stats.skewnorm, stats.norminvgauss, stats.gaussian_kde,
# tutorial/stats.rst (twice):
'underflow encountered in exp',
):
warnings.filterwarnings(
'once', message='.*' + key)
# docutils warnings when using notebooks (see gh-17322)
# these will hopefully be removed in the near future
for key in (
r"The frontend.OptionParser class will be replaced",
r"The frontend.Option class will be removed",
):
warnings.filterwarnings('ignore', message=key, category=DeprecationWarning)
warnings.filterwarnings(
'ignore',
message=r'.*is obsoleted by Node.findall()',
category=PendingDeprecationWarning,
)
warnings.filterwarnings(
'ignore',
message=r'There is no current event loop',
category=DeprecationWarning,
)
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/logo.svg'
html_favicon = '_static/favicon.ico'
html_theme_options = {
"github_url": "https://github.com/scipy/scipy",
"twitter_url": "https://twitter.com/SciPy_team",
"navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"],
"switcher": {
"json_url": "https://scipy.github.io/devdocs/_static/version_switcher.json",
"version_match": version,
}
}
if 'dev' in version:
html_theme_options["switcher"]["version_match"] = "development"
if 'versionwarning' in tags: # noqa
# Specific to docs.scipy.org deployment.
# See https://github.com/scipy/docs.scipy.org/blob/main/_static/versionwarning.js_t
src = ('var script = document.createElement("script");\n'
'script.type = "text/javascript";\n'
'script.src = "/doc/_static/versionwarning.js";\n'
'document.head.appendChild(script);')
html_context = {
'VERSIONCHECK_JS': src
}
html_js_files = ['versioncheck.js']
html_title = f"{project} v{version} Manual"
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_css_files = [
"scipy.css",
]
# html_additional_pages = {
# 'index': 'indexcontent.html',
# }
html_additional_pages = {}
html_use_modindex = True
html_domain_indices = False
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
mathjax_path = "scipy-mathjax/MathJax.js?config=scipy-mathjax"
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'neps': ('https://numpy.org/neps', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'asv': ('https://asv.readthedocs.io/en/stable/', None),
'statsmodels': ('https://www.statsmodels.org/stable', None),
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
np_docscrape.ClassDoc.extra_public_methods = [ # should match class.rst
'__call__', '__mul__', '__getitem__', '__len__',
]
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# maps functions with a name same as a class name that is indistinguishable
# Ex: scipy.signal.czt and scipy.signal.CZT or scipy.odr.odr and scipy.odr.ODR
# Otherwise, the stubs are overwritten when the name is same for
# OS (like MacOS) which has a filesystem that ignores the case
# See https://github.com/sphinx-doc/sphinx/pull/7927
autosummary_filename_map = {
"scipy.odr.odr": "odr-function",
"scipy.signal.czt": "czt-function",
}
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
autodoc_default_options = {
'inherited-members': None,
}
autodoc_typehints = 'none'
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Matplotlib plot_directive options
#------------------------------------------------------------------------------
plot_pre_code = """
import warnings
for key in (
'lsim2 is deprecated', # Deprecation of scipy.signal.lsim2
'impulse2 is deprecated', # Deprecation of scipy.signal.impulse2
'step2 is deprecated', # Deprecation of scipy.signal.step2
'interp2d` is deprecated', # Deprecation of scipy.interpolate.interp2d
'scipy.misc', # scipy.misc deprecated in v1.10.0; use scipy.datasets
'kurtosistest only valid', # intentionally "bad" excample in docstring
):
warnings.filterwarnings(action='ignore', message='.*' + key + '.*')
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96)]
plot_html_show_formats = False
plot_html_show_source_link = False
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Notebook tutorials with MyST-NB
# -----------------------------------------------------------------------------
nb_execution_mode = "auto"
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
# Not the same as from sphinx.util import inspect and needed here
import inspect # noqa: E402
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# Use the original function object if it is wrapped.
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
# SciPy's distributions are instances of *_gen. Point to this
# class since it contains the implementation of all the methods.
if isinstance(obj, (rv_generic, multi_rv_generic)):
obj = obj.__class__
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
startdir = os.path.abspath(os.path.join(dirname(scipy.__file__), '..'))
fn = relpath(fn, start=startdir).replace(os.path.sep, '/')
if fn.startswith('scipy/'):
m = re.match(r'^.*dev0\+([a-f0-9]+)$', scipy.__version__)
base_url = "https://github.com/scipy/scipy/blob"
if m:
return f"{base_url}/{m.group(1)}/{fn}{linespec}"
elif 'dev' in scipy.__version__:
return f"{base_url}/main/{fn}{linespec}"
else:
return f"{base_url}/v{scipy.__version__}/{fn}{linespec}"
else:
return None
# Tell overwrite numpydoc's logic to render examples containing rng.
SphinxDocString._str_examples = _rng_html_rewrite(
SphinxDocString._str_examples
)
class LegacyDirective(Directive):
"""
Adapted from docutils/parsers/rst/directives/admonitions.py
Uses a default text if the directive does not have contents. If it does,
the default text is concatenated to the contents.
"""
has_content = True
node_class = nodes.admonition
optional_arguments = 1
def run(self):
try:
obj = self.arguments[0]
except IndexError:
# Argument is empty; use default text
obj = "submodule"
text = (f"This {obj} is considered legacy and will no longer receive "
"updates. This could also mean it will be removed in future "
"SciPy versions.")
try:
self.content[0] = text+" "+self.content[0]
except IndexError:
# Content is empty; use the default text
source, lineno = self.state_machine.get_source_and_line(
self.lineno
)
self.content.append(
text,
source=source,
offset=lineno
)
text = '\n'.join(self.content)
# Create the admonition node, to be populated by `nested_parse`
admonition_node = self.node_class(rawsource=text)
# Set custom title
title_text = "Legacy"
textnodes, _ = self.state.inline_text(title_text, self.lineno)
title = nodes.title(title_text, '', *textnodes)
# Set up admonition node
admonition_node += title
# Select custom class for CSS styling
admonition_node['classes'] = ['admonition-legacy']
# Parse the directive contents
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
def setup(app):
app.add_directive("legacy", LegacyDirective)
| 16,583
| 31.839604
| 87
|
py
|
scipy
|
scipy-main/doc/source/doi_role.py
|
# -*- coding: utf-8 -*-
"""
doilinks
~~~~~~~~
Extension to add links to DOIs. With this extension you can use e.g.
:doi:`10.1016/S0022-2836(05)80360-2` in your documents. This will
create a link to a DOI resolver
(``https://doi.org/10.1016/S0022-2836(05)80360-2``).
The link caption will be the raw DOI.
You can also give an explicit caption, e.g.
:doi:`Basic local alignment search tool <10.1016/S0022-2836(05)80360-2>`.
:copyright: Copyright 2015 Jon Lund Steffensen. Based on extlinks by
the Sphinx team.
:license: BSD.
"""
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
def doi_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
full_url = 'https://doi.org/' + part
if not has_explicit_title:
title = 'DOI:' + part
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
def arxiv_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
full_url = 'https://arxiv.org/abs/' + part
if not has_explicit_title:
title = 'arXiv:' + part
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
def setup_link_role(app):
app.add_role('doi', doi_role, override=True)
app.add_role('DOI', doi_role, override=True)
app.add_role('arXiv', arxiv_role, override=True)
app.add_role('arxiv', arxiv_role, override=True)
def setup(app):
app.connect('builder-inited', setup_link_role)
return {'version': '0.1', 'parallel_read_safe': True}
| 1,785
| 32.698113
| 77
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/newton_krylov_preconditioning.py
|
import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print('Residual', abs(residual(sol.x)).max())
print('Evaluations', count[0])
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| 2,492
| 25.242105
| 70
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/optimize_global_1.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=256, iters=5,
sampling_method='sobol')
fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_point(res, marker='o', color=None):
ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
plot_point(results['DE'], color='c') # differential_evolution - cyan
plot_point(results['DA'], color='w') # dual_annealing. - white
# SHGO produces multiple minima, plot them all (with a smaller marker size)
plot_point(results['shgo'], color='r', marker='+')
plot_point(results['shgo_sobol'], color='r', marker='x')
for i in range(results['shgo_sobol'].xl.shape[0]):
ax.plot(512 + results['shgo_sobol'].xl[i, 0],
512 + results['shgo_sobol'].xl[i, 1],
'ro', ms=2)
ax.set_xlim([-4, 514*2])
ax.set_ylim([-4, 514*2])
fig.tight_layout()
plt.show()
| 1,623
| 30.230769
| 75
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/normdiscr_plot2.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints // 2
npointsf = float(npoints)
nbound = 4 # bounds for the truncated normal
normbound = (1 + 1/npointsf) * nbound # actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) # integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound # bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
rng = np.random.default_rng()
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
rvs = normdiscrete.rvs(size=n_sample, random_state=rng)
f, l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5, scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| 1,631
| 33
| 81
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/plot_boundary_modes.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
img = np.array([-2, -1, 0, 1, 2], float)
x = np.linspace(-2, 6, num=1000)
modes = ['constant', 'grid-constant', 'nearest', 'reflect', 'mirror', 'wrap',
'grid-wrap']
fig, axes = plt.subplots(len(modes), 3, figsize=(11, 8), sharex=True,
sharey=True)
for mode, (ax0, ax1, ax2) in zip(modes, axes):
y = ndimage.map_coordinates(img, [x], order=0, mode=mode)
ax0.scatter(np.arange(img.size), img)
ax0.plot(x, y, '-')
ax0.set_title(f'mode={mode}, order=0')
y2 = ndimage.map_coordinates(img, [x], order=1, mode=mode)
ax1.scatter(np.arange(img.size), img)
ax1.plot(x, y2, '-')
ax1.set_title(f'mode={mode}, order=1')
y3 = ndimage.map_coordinates(img, [x], order=3, mode=mode)
ax2.scatter(np.arange(img.size), img)
ax2.plot(x, y3, '-')
ax2.set_title(f'mode={mode}, order=3')
sz = len(img)
for ax in (ax0, ax1, ax2):
if mode in ['grid-wrap', 'reflect']:
ax.plot([-0.5, -0.5], [-2.5, 2.5], 'k--')
ax.plot([sz - 0.5, sz - 0.5], [-2.5, 2.5], 'k--')
elif mode in ['wrap', 'mirror']:
ax.plot([0, 0], [-2.5, 2.5], 'k--')
ax.plot([sz - 1, sz - 1], [-2.5, 2.5], 'k--')
if mode != 'constant':
for xx in range(int(x[0]), int(x[-1] + 1)):
if (xx < 0) or (xx > img.size - 1):
idx = np.argmin(np.abs(x - xx))
for y_vals, ax in zip((y, y2, y3), (ax0, ax1, ax2)):
ax.scatter(
[x[idx]], [y_vals[idx]], facecolors='none',
edgecolor='#0343df', marker='o'
)
plt.tight_layout()
plt.show()
| 1,744
| 31.314815
| 77
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/normdiscr_plot1.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints // 2
npointsf = float(npoints)
nbound = 4 # bounds for the truncated normal
normbound = (1 + 1/npointsf) * nbound # actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) # integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound # bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
rng = np.random.default_rng()
rvs = normdiscrete.rvs(size=n_sample, random_state=rng)
f, l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| 1,536
| 33.931818
| 79
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/gaussian_filter_plot1.py
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import gaussian_filter
grids = 2
boxs = 5
voxelarray = np.zeros((boxs * grids, boxs * grids, boxs * grids))
i = 1
for xi in range(0, 2):
for yi in range(0, 2):
for zi in range(0, 2):
voxelarray[
xi * boxs: xi * boxs + boxs,
yi * boxs: yi * boxs + boxs,
zi * boxs: zi * boxs + boxs,
] = i
i += 1
voxelarray = np.uint8(voxelarray * 255 / 8)
cmap = plt.get_cmap("YlGnBu")
def plot_voxels(varray, ax, title):
colors = cmap(varray)
ax.view_init(30, 200)
ax.axis("off")
ax.voxels(varray, facecolors=colors, edgecolor="#000000", linewidth=0.1)
ax.set_title(title, fontsize=30)
fig = plt.figure(figsize=(16, 9))
ax1 = fig.add_subplot(1, 3, 1, projection="3d")
ax2 = fig.add_subplot(1, 3, 2, projection="3d")
ax3 = fig.add_subplot(1, 3, 3, projection="3d")
plot_voxels(voxelarray, ax1, title="Original")
voxelarray2 = gaussian_filter(voxelarray, sigma=1)
plot_voxels(voxelarray2, ax2, title="gaussian_filter \n sigma=1")
voxelarray3 = gaussian_filter(voxelarray, sigma=3)
plot_voxels(voxelarray3, ax3, title="gaussian_filter \n sigma=3")
plt.tight_layout()
plt.show()
| 1,256
| 25.744681
| 76
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/morphology_binary_dilation_erosion.py
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
# code for ball taken from
# https://github.com/scikit-image/scikit-image/blob/main/skimage/morphology/footprints.py#L225-L252
# and therefore same as `from skimage.morphology import ball`
def ball(radius, dtype=np.uint8):
n = 2 * radius + 1
Z, Y, X = np.mgrid[
-radius: radius: n * 1j,
-radius: radius: n * 1j,
-radius: radius: n * 1j
]
s = X ** 2 + Y ** 2 + Z ** 2
return np.array(s <= radius * radius, dtype=dtype)
def plot_voxels(varray, ax, title):
ax.view_init(20, 200)
ax.voxels(varray, edgecolor="k")
ax.set_title(title, fontsize=30)
voxelarray = np.full((11, 11, 11), 0)
voxelarray[5, 3, 5] = 1
voxelarray[5, 7, 5] = 1
img_morphed = scipy.ndimage.binary_dilation(voxelarray, ball(3))
img_morphed2 = scipy.ndimage.binary_erosion(img_morphed, ball(2))
fig = plt.figure(figsize=(16, 9))
ax1 = fig.add_subplot(1, 3, 1, projection="3d")
ax2 = fig.add_subplot(1, 3, 2, projection="3d")
ax3 = fig.add_subplot(1, 3, 3, projection="3d")
plot_voxels(voxelarray, ax1, title="a) Original")
plot_voxels(img_morphed, ax2, title="b) binary_dilation \nwith ball, radius 3")
plot_voxels(img_morphed2, ax3,
title="c) binary_erosion of b \nwith ball, radius 2")
plt.tight_layout()
plt.show()
| 1,332
| 28.622222
| 99
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/optimize_global_2.py
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111, projection='3d')
ax.view_init(45, -45)
ax.plot_surface(xgrid, ygrid, eggholder(xy), cmap='terrain')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('eggholder(x, y)')
fig.tight_layout()
plt.show()
| 657
| 24.307692
| 70
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/plot_interp_grid.py
|
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
orders = [2, 3]
fig, axes = plt.subplots(1, len(orders), figsize=(11, 5))
n_cells = 7 # grid will be size (n_cells, n_cells)
# desired interpolation coordinate (xi, yi)
xi, yi = 3.3, 3.7
def get_start(cc, order):
if order % 1 == 0:
start = math.floor(cc) - order // 2
else:
start = math.floor(cc + 0.5) - order // 2
return start
for ax, order in zip(axes, orders):
# draw open circles at the locations of pixel centers
for n in range(n_cells):
ax.plot(np.arange(n_cells), -np.full(n_cells, n), 'ko',
fillstyle='none')
# draw pixel borders
for n in range(n_cells + 1):
ax.plot([n - 0.5, n - 0.5], [0.5, -n_cells + .5], 'k-')
ax.plot([-0.5, n_cells - .5], [-n + 0.5, -n + 0.5], 'k-')
# plot an example coordinate location to interpolate
ax.plot([xi], [-yi], 'rx')
# plot filled circles for the points that will be involved in the
# interpolation
startx = get_start(xi, order)
starty = get_start(yi, order)
xc = np.tile(np.arange(startx, startx + order + 1)[:, np.newaxis],
(1, order + 1)).ravel()
yc = np.tile(np.arange(starty, starty + order + 1)[np.newaxis, :],
(order + 1, 1)).ravel()
ax.plot(xc, -yc, 'ko')
ax.set_title("Interpolation (order = {})".format(order),
fontdict=dict(size=16, weight='bold'))
# set limits and ticks for 0, 0 voxel at upper left
ax.axis('square')
ax.set_xticks(np.arange(n_cells + 1))
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
yticks = ticker.FixedLocator(-np.arange(n_cells, -1, -1))
ax.yaxis.set_major_locator(yticks)
yticklabels = ticker.FixedFormatter(np.arange(n_cells, -1, -1))
ax.yaxis.set_major_formatter(yticklabels)
ax.set_ylim([-n_cells + 0.5, 0.5])
ax.set_xlim([-0.5, n_cells - 0.5])
plt.tight_layout()
plt.plot()
| 1,993
| 30.15625
| 70
|
py
|
scipy
|
scipy-main/doc/source/tutorial/examples/ndimage/3D_binary_structure.py
|
import matplotlib.pyplot as plt
import scipy.ndimage
def plot_voxels(varray, ax, title):
ax.view_init(20, 200)
ax.voxels(varray, edgecolor="k")
ax.set_title(title, fontsize=30)
fig = plt.figure(figsize=(16, 9))
for i in [1, 2, 3]:
ax = fig.add_subplot(1, 3, i, projection="3d")
arrray = scipy.ndimage.generate_binary_structure(3, i)
plot_voxels(arrray, ax, title=f"rank=3 \n connectivity={i}")
plt.tight_layout()
plt.show()
| 454
| 21.75
| 64
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/kde_plot4.py
|
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| 1,457
| 31.4
| 78
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/hinv_plot.py
|
import numpy as np
from scipy.stats.sampling import NumericalInverseHermite
from scipy.stats import norm
from scipy.special import ndtr
import matplotlib.pyplot as plt
class StandardNormal:
def pdf(self, x):
return 1/np.sqrt(2*np.pi) * np.exp(-x**2 / 2)
def cdf(self, x):
return ndtr(x)
dist = StandardNormal()
urng = np.random.default_rng()
rng = NumericalInverseHermite(dist, random_state=urng)
rvs = rng.rvs(10000)
x = np.linspace(rvs.min()-0.1, rvs.max()+0.1, 1000)
fx = norm.pdf(x)
plt.plot(x, fx, 'r-', lw=2, label='true distribution')
plt.hist(rvs, bins=20, density=True, alpha=0.8, label='random variates')
plt.xlabel('x')
plt.ylabel('PDF(x)')
plt.title('Numerical Inverse Hermite Samples')
plt.legend()
plt.show()
| 752
| 24.965517
| 72
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_conv_mc.py
|
"""Integration convergence.
The function is a synthetic example specifically designed
to verify the correctness of the implementation [1]_.
References
----------
.. [1] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
2020.
"""
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
n_conv = 99
ns_gen = 2 ** np.arange(4, 13) # 13
def art_2(sample):
# dim 3, true value 5/3 + 5*(5 - 1)/4
return np.sum(sample, axis=1) ** 2
functions = namedtuple('functions', ['name', 'func', 'dim', 'ref'])
case = functions('Art 2', art_2, 5, 5 / 3 + 5 * (5 - 1) / 4)
def conv_method(sampler, func, n_samples, n_conv, ref):
samples = [sampler(n_samples) for _ in range(n_conv)]
samples = np.array(samples)
evals = [np.sum(func(sample)) / n_samples for sample in samples]
squared_errors = (ref - np.array(evals)) ** 2
rmse = (np.sum(squared_errors) / n_conv) ** 0.5
return rmse
# Analysis
sample_mc_rmse = []
rng = np.random.default_rng()
for ns in ns_gen:
# Monte Carlo
sampler_mc = lambda x: rng.random((x, case.dim))
conv_res = conv_method(sampler_mc, case.func, ns, n_conv, case.ref)
sample_mc_rmse.append(conv_res)
sample_mc_rmse = np.array(sample_mc_rmse)
# Plot
fig, ax = plt.subplots(figsize=(5, 3))
ax.set_aspect('equal')
ratio = sample_mc_rmse[0] / ns_gen[0] ** (-1 / 2)
ax.plot(ns_gen, ns_gen ** (-1 / 2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_mc_rmse)
ax.set_xlabel(r'$N_s$')
ax.set_xscale('log')
ax.set_xticks(ns_gen)
ax.set_xticklabels([fr'$2^{{{ns}}}$' for ns in np.arange(4, 13)])
ax.set_ylabel(r'$\log (\epsilon)$')
ax.set_yscale('log')
fig.tight_layout()
plt.show()
| 1,704
| 22.040541
| 73
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_mc.py
|
"""Multiple MC to show how it can be bad."""
from scipy.stats import qmc
from scipy.stats._qmc import check_random_state
import numpy as np
import matplotlib.pyplot as plt
rng = np.random.default_rng()
n_sample = 256
dim = 2
sample = {}
# MC
sample['MC 1'] = rng.random((n_sample, dim))
sample["MC 2"] = rng.random((n_sample, dim))
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
for i, kind in enumerate(sample):
axs[i].scatter(sample[kind][:, 0], sample[kind][:, 1])
axs[i].set_aspect('equal')
axs[i].set_xlabel(r'$x_1$')
axs[i].set_ylabel(r'$x_2$')
plt.tight_layout()
plt.show()
| 606
| 18.580645
| 58
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/kde_plot3.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
rng = np.random.default_rng()
x1 = rng.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200, random_state=rng) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| 1,249
| 28.761905
| 78
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_conv_mc_sobol.py
|
"""Integration convergence comparison: MC vs Sobol'.
The function is a synthetic example specifically designed
to verify the correctness of the implementation [2]_.
References
----------
.. [1] I. M. Sobol. The distribution of points in a cube and the accurate
evaluation of integrals. Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
1967.
.. [2] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
2020.
"""
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import qmc
n_conv = 99
ns_gen = 2 ** np.arange(4, 13) # 13
def art_2(sample):
# dim 3, true value 5/3 + 5*(5 - 1)/4
return np.sum(sample, axis=1) ** 2
functions = namedtuple('functions', ['name', 'func', 'dim', 'ref'])
case = functions('Art 2', art_2, 5, 5 / 3 + 5 * (5 - 1) / 4)
def conv_method(sampler, func, n_samples, n_conv, ref):
samples = [sampler(n_samples) for _ in range(n_conv)]
samples = np.array(samples)
evals = [np.sum(func(sample)) / n_samples for sample in samples]
squared_errors = (ref - np.array(evals)) ** 2
rmse = (np.sum(squared_errors) / n_conv) ** 0.5
return rmse
# Analysis
sample_mc_rmse = []
sample_sobol_rmse = []
rng = np.random.default_rng()
for ns in ns_gen:
# Monte Carlo
sampler_mc = lambda x: rng.random((x, case.dim))
conv_res = conv_method(sampler_mc, case.func, ns, n_conv, case.ref)
sample_mc_rmse.append(conv_res)
# Sobol'
engine = qmc.Sobol(d=case.dim, scramble=False)
conv_res = conv_method(engine.random, case.func, ns, 1, case.ref)
sample_sobol_rmse.append(conv_res)
sample_mc_rmse = np.array(sample_mc_rmse)
sample_sobol_rmse = np.array(sample_sobol_rmse)
# Plot
fig, ax = plt.subplots(figsize=(4, 4))
ax.set_aspect('equal')
# MC
ratio = sample_mc_rmse[0] / ns_gen[0] ** (-1 / 2)
ax.plot(ns_gen, ns_gen ** (-1 / 2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_mc_rmse, label="MC")
# Sobol'
ratio = sample_sobol_rmse[0] / ns_gen[0] ** (-2/2)
ax.plot(ns_gen, ns_gen ** (-2/2) * ratio, ls='-.', c='k')
ax.scatter(ns_gen, sample_sobol_rmse, label="Sobol' unscrambled")
ax.set_xlabel(r'$N_s$')
ax.set_xscale('log')
ax.set_xticks(ns_gen)
ax.set_xticklabels([fr'$2^{{{ns}}}$' for ns in np.arange(4, 13)])
ax.set_ylabel(r'$\log (\epsilon)$')
ax.set_yscale('log')
ax.legend(loc='upper right')
fig.tight_layout()
plt.show()
| 2,387
| 24.956522
| 73
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_mc_qmc.py
|
"""MC vs QMC in terms of space filling."""
from scipy.stats import qmc
import numpy as np
import matplotlib.pyplot as plt
rng = np.random.default_rng()
n_sample = 256
dim = 2
sample = {}
# MC
sample['MC'] = rng.random((n_sample, dim))
# Sobol'
engine = qmc.Sobol(d=dim, seed=rng)
sample["Sobol'"] = engine.random(n_sample)
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
for i, kind in enumerate(sample):
axs[i].scatter(sample[kind][:, 0], sample[kind][:, 1])
axs[i].set_aspect('equal')
axs[i].set_xlabel(r'$x_1$')
axs[i].set_ylabel(r'$x_2$')
axs[i].set_title(f'{kind}—$C^2 = ${qmc.discrepancy(sample[kind]):.2}')
plt.tight_layout()
plt.show()
| 673
| 18.823529
| 74
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/mgc_plot3.py
|
import numpy as np
import matplotlib.pyplot as plt
def mgc_plot(x, y, sim_name):
"""Plot sim and MGC-plot"""
# simulation
plt.figure(figsize=(8, 8))
ax = plt.gca()
ax.set_title(sim_name + " Simulation", fontsize=20)
ax.scatter(x, y)
ax.set_xlabel('X', fontsize=15)
ax.set_ylabel('Y', fontsize=15)
ax.axis('equal')
ax.tick_params(axis="x", labelsize=15)
ax.tick_params(axis="y", labelsize=15)
plt.show()
rng = np.random.default_rng()
unif = np.array(rng.uniform(0, 5, size=100))
x = unif * np.cos(np.pi * unif)
y = unif * np.sin(np.pi * unif) + 0.4 * rng.random(x.size)
mgc_plot(x, y, "Spiral")
| 648
| 23.037037
| 58
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_discrepancy.py
|
"""Calculate the discrepancy of 2 designs and compare them."""
import numpy as np
from scipy.stats import qmc
import matplotlib.pyplot as plt
space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
space_2 = np.array([[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]])
l_bounds = [0.5, 0.5]
u_bounds = [6.5, 6.5]
space_1 = qmc.scale(space_1, l_bounds, u_bounds, reverse=True)
space_2 = qmc.scale(space_2, l_bounds, u_bounds, reverse=True)
sample = {'space_1': space_1, 'space_2': space_2}
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
for i, kind in enumerate(sample):
axs[i].scatter(sample[kind][:, 0], sample[kind][:, 1])
axs[i].set_aspect('equal')
axs[i].set_xlabel(r'$x_1$')
axs[i].set_ylabel(r'$x_2$')
axs[i].set_title(f'{kind}—$C^2 = ${qmc.discrepancy(sample[kind]):.5}')
plt.tight_layout()
plt.show()
| 847
| 28.241379
| 74
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/kde_plot5.py
|
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def measure(n):
"""Measurement model, return two coupled measurements."""
m1 = np.random.normal(size=n)
m2 = np.random.normal(scale=0.5, size=n)
return m1+m2, m1-m2
m1, m2 = measure(2000)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel.evaluate(positions).T, X.shape)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
ax.plot(m1, m2, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
plt.show()
| 814
| 22.285714
| 61
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/mgc_plot2.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multiscale_graphcorr
def mgc_plot(x, y, mgc_dict):
"""Plot sim and MGC-plot"""
plt.figure(figsize=(8, 8))
ax = plt.gca()
# local correlation map
mgc_map = mgc_dict["mgc_map"]
# draw heatmap
ax.set_title("Local Correlation Map", fontsize=20)
im = ax.imshow(mgc_map, cmap='YlGnBu')
# colorbar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel("", rotation=-90, va="bottom")
ax.invert_yaxis()
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
# optimal scale
opt_scale = mgc_dict["opt_scale"]
ax.scatter(opt_scale[0], opt_scale[1],
marker='X', s=200, color='red')
# other formatting
ax.tick_params(bottom="off", left="off")
ax.set_xlabel('#Neighbors for X', fontsize=15)
ax.set_ylabel('#Neighbors for Y', fontsize=15)
ax.tick_params(axis="x", labelsize=15)
ax.tick_params(axis="y", labelsize=15)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
rng = np.random.default_rng()
x = np.linspace(-1, 1, num=100)
y = x + 0.3 * rng.random(x.size)
_, _, mgc_dict = multiscale_graphcorr(x, y, random_state=rng)
mgc_plot(x, y, mgc_dict)
| 1,282
| 25.183673
| 61
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/kde_plot2.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
x_eval = np.linspace(-10, 10, num=200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x1, np.zeros(x1.shape), 'b+', ms=20) # rug plot
kde3 = stats.gaussian_kde(x1, bw_method=my_kde_bandwidth)
ax.plot(x_eval, kde3(x_eval), 'g-', label="With smaller BW")
plt.show()
| 630
| 26.434783
| 63
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/mgc_plot1.py
|
import numpy as np
import matplotlib.pyplot as plt
def mgc_plot(x, y, sim_name):
"""Plot sim and MGC-plot"""
# simulation
plt.figure(figsize=(8, 8))
ax = plt.gca()
ax.set_title(sim_name + " Simulation", fontsize=20)
ax.scatter(x, y)
ax.set_xlabel('X', fontsize=15)
ax.set_ylabel('Y', fontsize=15)
ax.axis('equal')
ax.tick_params(axis="x", labelsize=15)
ax.tick_params(axis="y", labelsize=15)
plt.show()
rng = np.random.default_rng()
x = np.linspace(-1, 1, num=100)
y = x + 0.3 * rng.random(x.size)
mgc_plot(x, y, "Linear")
| 577
| 21.230769
| 55
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_sobol_halton.py
|
"""Sobol' and Halton sequences."""
from scipy.stats import qmc
import numpy as np
import matplotlib.pyplot as plt
rng = np.random.default_rng()
n_sample = 256
dim = 2
sample = {}
# Sobol'
engine = qmc.Sobol(d=dim, seed=rng)
sample["Sobol'"] = engine.random(n_sample)
# Halton
engine = qmc.Halton(d=dim, seed=rng)
sample["Halton"] = engine.random(n_sample)
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
for i, kind in enumerate(sample):
axs[i].scatter(sample[kind][:, 0], sample[kind][:, 1])
axs[i].set_aspect('equal')
axs[i].set_xlabel(r'$x_1$')
axs[i].set_ylabel(r'$x_2$')
axs[i].set_title(f'{kind}—$C^2 = ${qmc.discrepancy(sample[kind]):.2}')
plt.tight_layout()
plt.show()
| 706
| 19.2
| 74
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/mgc_plot4.py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multiscale_graphcorr
def mgc_plot(x, y, mgc_dict):
"""Plot sim and MGC-plot"""
plt.figure(figsize=(8, 8))
ax = plt.gca()
# local correlation map
mgc_map = mgc_dict["mgc_map"]
# draw heatmap
ax.set_title("Local Correlation Map", fontsize=20)
im = ax.imshow(mgc_map, cmap='YlGnBu')
# colorbar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel("", rotation=-90, va="bottom")
ax.invert_yaxis()
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
# optimal scale
opt_scale = mgc_dict["opt_scale"]
ax.scatter(opt_scale[0], opt_scale[1],
marker='X', s=200, color='red')
# other formatting
ax.tick_params(bottom="off", left="off")
ax.set_xlabel('#Neighbors for X', fontsize=15)
ax.set_ylabel('#Neighbors for Y', fontsize=15)
ax.tick_params(axis="x", labelsize=15)
ax.tick_params(axis="y", labelsize=15)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
rng = np.random.default_rng()
unif = np.array(rng.uniform(0, 5, size=100))
x = unif * np.cos(np.pi * unif)
y = unif * np.sin(np.pi * unif) + 0.4 * rng.random(x.size)
_, _, mgc_dict = multiscale_graphcorr(x, y, random_state=rng)
mgc_plot(x, y, mgc_dict)
| 1,352
| 26.612245
| 61
|
py
|
scipy
|
scipy-main/doc/source/tutorial/stats/plots/qmc_plot_curse.py
|
"""Visualize the curse-of-dimensionality.
It presents a saturated design in 1, 2 and 3 dimensions for a
given discretization.
"""
import matplotlib.pyplot as plt
import numpy as np
disc = 10
x = np.linspace(0, 1, disc)
y = np.linspace(0, 1, disc)
z = np.linspace(0, 1, disc)
xx, yy, zz = np.meshgrid(x, y, z)
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
ax.set_aspect('equal')
ax.scatter(xx, yy * 0)
ax.set_xlabel(r'$x_1$')
ax.get_yaxis().set_visible(False)
ax = fig.add_subplot(132)
ax.set_aspect('equal')
ax.scatter(xx, yy)
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
ax = fig.add_subplot(133, projection='3d')
ax.scatter(xx, yy, zz)
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
ax.set_zlabel(r'$x_3$')
plt.tight_layout(pad=2)
plt.show()
| 770
| 19.289474
| 61
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/setup.py
|
from setuptools import setup
setup(
name="ptmcmcsampler",
author="Justin A. Ellis",
author_email="justin.ellis18@gmail.com",
packages=["PTMCMCSampler"],
package_dir={"PTMCMCSampler": "PTMCMCSampler"},
url="https://github.com/jellis18/PTMCMCSampler",
license="MIT",
zip_safe=False,
description="Parallel tempering MCMC sampler written in Python",
long_description=open("README.md").read() + "\n\n" + "---------\n\n" + open("HISTORY.md").read(),
long_description_content_type="text/markdown",
package_data={"": ["README.md", "HISTORY.md"]},
install_requires=["numpy>=1.16.3", "scipy>=1.2.0"],
python_requires=">=3.7",
extras_require={"mpi": ["mpi4py>=3.0.3"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
)
| 1,284
| 37.939394
| 101
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/examples/simple.py
|
#!/usr/bin/env python
# coding: utf-8
from pathlib import Path
import numpy as np
from PTMCMCSampler import PTMCMCSampler
# ## Define the likelihood and posterior
#
# Functions must read in parameter vector and output log-likelihood or log-prior.
# Usually easiest to use a class if you need to store some other data or parameters
class GaussianLikelihood(object):
def __init__(self, ndim=2, pmin=-10, pmax=10):
self.a = np.ones(ndim) * pmin
self.b = np.ones(ndim) * pmax
# get means
self.mu = np.random.uniform(pmin, pmax, ndim)
# ... and a positive definite, non-trivial covariance matrix.
cov = 0.5 - np.random.rand(ndim**2).reshape((ndim, ndim))
cov = np.triu(cov)
cov += cov.T - np.diag(cov.diagonal())
self.cov = np.dot(cov, cov)
# Invert the covariance matrix first.
self.icov = np.linalg.inv(self.cov)
def lnlikefn(self, x):
diff = x - self.mu
return -np.dot(diff, np.dot(self.icov, diff)) / 2.0
def lnpriorfn(self, x):
if np.all(self.a <= x) and np.all(self.b >= x):
return 0.0
else:
return -np.inf
# ## Setup Gaussian model class
# In[3]:
ndim = 20
pmin, pmax = 0.0, 10.0
glo = GaussianLikelihood(ndim=ndim, pmin=pmin, pmax=pmax)
# ## Setup sampler
#
# Need to initalize the sample at ```p0``` and give an inital jump covariance matrix ```cov```.
# In[4]:
# Set the start position and the covariance
p0 = np.random.uniform(pmin, pmax, ndim)
cov = np.eye(ndim) * 0.1**2
# In[5]:
sampler = PTMCMCSampler.PTSampler(
ndim, glo.lnlikefn, glo.lnpriorfn, np.copy(cov), outDir=str(Path(__file__).parent / "chains")
)
# ## Add custom jump
#
# Can add custom jump in the following way
# In[6]:
class UniformJump(object):
def __init__(self, pmin, pmax):
"""Draw random parameters from pmin, pmax"""
self.pmin = pmin
self.pmax = pmax
def jump(self, x, it, beta):
"""
Function prototype must read in parameter vector x,
sampler iteration number it, and inverse temperature beta
"""
# log of forward-backward jump probability
lqxy = 0
# uniformly drawm parameters
q = np.random.uniform(self.pmin, self.pmax, len(x))
return q, lqxy
# In[7]:
# add to jump proposal cycle
ujump = UniformJump(pmin, pmax)
sampler.addProposalToCycle(ujump.jump, 5)
# ## Run Sampler for 100000 steps
#
# Different jump proposal weights are given as integers.
# For example we have used a weight of 20 for all three proposals here.
# That means that each will be used with a probability of 20/60 = 1/3.
# In[8]:
sampler.sample(p0, 10000, burn=500, thin=1, covUpdate=500, SCAMweight=20, AMweight=20, DEweight=20)
| 2,794
| 21.723577
| 99
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/tests/test_simple.py
|
"""
Integration tests to make sure the sampler at least runs. Better than nothing...
"""
import shutil
from unittest import TestCase
import numpy as np
from mpi4py import MPI
from PTMCMCSampler import PTMCMCSampler
from PTMCMCSampler import nompi4py as MPIDUMMY
class GaussianLikelihood(object):
def __init__(self, ndim=2, pmin=-10, pmax=10):
self.a = np.ones(ndim) * pmin
self.b = np.ones(ndim) * pmax
# get means
self.mu = np.random.uniform(pmin, pmax, ndim)
# ... and a positive definite, non-trivial covariance matrix.
cov = 0.5 - np.random.rand(ndim**2).reshape((ndim, ndim))
cov = np.triu(cov)
cov += cov.T - np.diag(cov.diagonal())
self.cov = np.dot(cov, cov)
# Invert the covariance matrix first.
self.icov = np.linalg.inv(self.cov)
def lnlikefn(self, x):
diff = x - self.mu
return -np.dot(diff, np.dot(self.icov, diff)) / 2.0
def lnpriorfn(self, x):
if np.all(self.a <= x) and np.all(self.b >= x):
return 0.0
else:
return -np.inf
class UniformJump(object):
def __init__(self, pmin, pmax):
"""Draw random parameters from pmin, pmax"""
self.pmin = pmin
self.pmax = pmax
def jump(self, x, it, beta):
"""
Function prototype must read in parameter vector x,
sampler iteration number it, and inverse temperature beta
"""
# log of forward-backward jump probability
lqxy = 0
# uniformly drawm parameters
q = np.random.uniform(self.pmin, self.pmax, len(x))
return q, lqxy
class TestSimpleSampler(TestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree("chains")
def setUp(self) -> None:
self.comm = MPI.COMM_WORLD
def test_simple(self):
# ## Setup Gaussian model class
ndim = 20
pmin, pmax = 0.0, 10.0
glo = GaussianLikelihood(ndim=ndim, pmin=pmin, pmax=pmax)
# ## Setup sampler
# Set the start position and the covariance
p0 = np.random.uniform(pmin, pmax, ndim)
cov = np.eye(ndim) * 0.1**2
sampler = PTMCMCSampler.PTSampler(
ndim,
glo.lnlikefn,
glo.lnpriorfn,
np.copy(cov),
outDir="./chains",
comm=self.comm,
)
# add to jump proposal cycle
ujump = UniformJump(pmin, pmax)
sampler.addProposalToCycle(ujump.jump, 5)
sampler.sample(p0, 10000, burn=500, thin=1, covUpdate=500, SCAMweight=20, AMweight=20, DEweight=20)
class TestSimpleSamplerNoMPI(TestSimpleSampler):
def setUp(self) -> None:
self.comm = MPIDUMMY.COMM_WORLD
def test_simple(self):
return super().test_simple()
| 2,808
| 25.5
| 107
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/tests/test_nuts.py
|
import shutil
from unittest import TestCase
import numpy as np
import scipy.linalg as sl
import scipy.optimize as so
from mpi4py import MPI
from PTMCMCSampler import PTMCMCSampler
from PTMCMCSampler import nompi4py as MPIDUMMY
class GaussianLikelihood(object):
def __init__(self, ndim=2, pmin=-10, pmax=10):
self.a = np.ones(ndim) * pmin
self.b = np.ones(ndim) * pmax
def lnlikefn(self, x):
return -0.5 * np.sum(x**2) - len(x) * 0.5 * np.log(2 * np.pi)
def lnlikefn_grad(self, x):
ll = -0.5 * np.sum(x**2) - len(x) * 0.5 * np.log(2 * np.pi)
ll_grad = -x
return ll, ll_grad
def lnpriorfn(self, x):
if np.all(self.a <= x) and np.all(self.b >= x):
return 0.0
else:
return -np.inf
return 0.0
def lnpriorfn_grad(self, x):
return self.lnpriorfn(x), np.zeros_like(x)
def lnpost_grad(self, x):
ll, ll_grad = self.lnlikefn_grad(x)
lp, lp_grad = self.lnpriorfn_grad(x)
return ll + lp, ll_grad + lp_grad
def lnpost(self, x):
return self.lnpost_grad(x)[0]
def hessian(self, x):
return -np.eye(len(x))
class intervalTransform(object):
"""
Wrapper class of the likelihood for Hamiltonian samplers. This implements a
coordinate transformation for all parameters from an interval to all real numbers.
"""
def __init__(self, likob, pmin=None, pmax=None):
"""Initialize the intervalLikelihood with a ptaLikelihood object"""
self.likob = likob
if pmin is None:
self.a = likob.a
else:
self.a = pmin * np.ones_like(likob.a)
if pmax is None:
self.b = likob.b
else:
self.b = pmax * np.ones_like(likob.b)
def forward(self, x):
"""Forward transform the real coordinates (on the interval) to the
transformed coordinates (on all real numbers)
"""
p = np.atleast_2d(x.copy())
posinf, neginf = (self.a == x), (self.b == x)
m = ~(posinf | neginf)
p[:, m] = np.log((p[:, m] - self.a[m]) / (self.b[m] - p[:, m]))
p[:, posinf] = np.inf
p[:, neginf] = -np.inf
return p.reshape(x.shape)
def backward(self, p):
"""Backward transform the transformed coordinates (on all real numbers)
to the real coordinates (on the interval)
"""
x = np.atleast_2d(p.copy())
x[:, :] = (self.b[:] - self.a[:]) * np.exp(x[:, :]) / (1 + np.exp(x[:, :])) + self.a[:]
return x.reshape(p.shape)
def logjacobian_grad(self, p):
"""Return the log of the Jacobian at point p"""
lj = np.sum(np.log(self.b[:] - self.a[:]) + p[:] - 2 * np.log(1.0 + np.exp(p[:])))
lj_grad = np.zeros_like(p)
lj_grad[:] = (1 - np.exp(p[:])) / (1 + np.exp(p[:]))
return lj, lj_grad
def dxdp(self, p):
"""Derivative of x wrt p (jacobian for chain-rule) - diagonal"""
pp = np.atleast_2d(p)
d = np.ones_like(pp)
d[:, :] = (self.b[:] - self.a[:]) * np.exp(pp[:, :]) / (1 + np.exp(pp[:, :])) ** 2
return d.reshape(p.shape)
def d2xd2p(self, p):
"""Derivative of x wrt p (jacobian for chain-rule) - diagonal"""
pp = np.atleast_2d(p)
d = np.zeros_like(pp)
d[:, :] = (self.b[:] - self.a[:]) * (np.exp(2 * pp[:, :]) - np.exp(pp[:, :])) / (1 + np.exp(pp[:, :])) ** 3
return d.reshape(p.shape)
def logjac_hessian(self, p):
"""The Hessian of the log-jacobian"""
# p should not be more than one-dimensional
assert len(p.shape) == 1
return np.diag(-2 * np.exp(p) / (1 + np.exp(p)) ** 2)
def lnlikefn_grad(self, p, **kwargs):
"""The log-likelihood in the new coordinates"""
x = self.backward(p)
ll, ll_grad = self.likob.lnlikefn_grad(x, **kwargs)
lj, lj_grad = self.logjacobian_grad(p)
return ll + lj, ll_grad * self.dxdp(p) + lj_grad
def lnlikefn(self, p, **kwargs):
return self.lnlikefn_grad(p)[0]
def lnpriorfn_grad(self, p, **kwargs):
"""The log-prior in the new coordinates. Do not include the Jacobian"""
x = self.backward(p)
lp, lp_grad = self.likob.lnpriorfn_grad(x)
return lp, lp_grad * self.dxdp(p)
def lnpriorfn(self, p, **kwargs):
return self.lnpriorfn_grad(p)[0]
def logpostfn_grad(self, p, **kwargs):
"""The log-posterior in the new coordinates"""
x = self.backward(p)
lp, lp_grad = self.likob.lnpost_grad(x)
lj, lj_grad = self.logjacobian_grad(p)
return lp + lj, lp_grad * self.dxdp(p) + lj_grad
def hessian(self, p):
"""The Hessian matrix in the new coordinates"""
# p should not be more than one-dimensional
assert len(p.shape) == 1
# Get quantities from un-transformed distribution
x = self.backward(p)
orig_hessian = self.likob.hessian(x)
_, orig_lp_grad = self.likob.lnpost_grad(x)
# Transformation properties
hessian = self.logjac_hessian(p)
dxdpf = np.diag(self.dxdp(p))
hessian += np.dot(dxdpf.T, np.dot(orig_hessian, dxdpf))
hessian -= np.diag(self.d2xd2p(p) * orig_lp_grad)
return hessian
class TestNuts(TestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree("chains")
def setUp(self) -> None:
self.comm = MPI.COMM_WORLD
def test_nuts(self):
ndim = 40
glo = GaussianLikelihood(ndim=ndim, pmin=0.0, pmax=10.0)
glt = intervalTransform(glo, pmin=0.0, pmax=10)
gl = glt
p0 = np.ones(ndim) * 0.01
# Maximize using scipy
result = so.minimize(
lambda x: -gl.lnlikefn(x),
p0,
jac=lambda x: -gl.lnlikefn_grad(x)[1],
method="Newton-CG",
hess=lambda x: -gl.hessian(x),
options={"disp": True},
)
# Set the start position and the covariance
p0 = result["x"]
h0 = gl.hessian(p0)
cov = sl.cho_solve(sl.cho_factor(-h0), np.eye(len(h0)))
sampler = PTMCMCSampler.PTSampler(
ndim,
gl.lnlikefn,
gl.lnpriorfn,
np.copy(cov),
logl_grad=gl.lnlikefn_grad,
logp_grad=gl.lnpriorfn_grad,
outDir="./chains",
comm=self.comm,
)
sampler.sample(
p0,
1000,
burn=500,
thin=1,
covUpdate=500,
SCAMweight=10,
AMweight=10,
DEweight=10,
NUTSweight=10,
HMCweight=10,
MALAweight=0,
HMCsteps=100,
HMCstepsize=0.4,
)
class TestNutsNoMPI(TestNuts):
def setUp(self) -> None:
self.comm = MPIDUMMY.COMM_WORLD
| 6,886
| 29.339207
| 115
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/docs/conf.py
|
# -*- coding: utf-8 -*-
#
# PTMCMCSampler documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 28 20:40:33 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../PTMCMCSampler"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PTMCMCSampler"
copyright = "2015, Justin A. Ellis"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ""
# The full version, including alpha/beta/rc tags.
release = ""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "PTMCMCSamplerdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "PTMCMCSampler.tex", "PTMCMCSampler Documentation", "Justin A. Ellis", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "ptmcmcsampler", "PTMCMCSampler Documentation", ["Justin A. Ellis"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"PTMCMCSampler",
"PTMCMCSampler Documentation",
"Justin A. Ellis",
"PTMCMCSampler",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "PTMCMCSampler"
epub_author = "Justin A. Ellis"
epub_publisher = "Justin A. Ellis"
epub_copyright = "2015, Justin A. Ellis"
# The basename for the epub file. It defaults to the project name.
# epub_basename = u'PTMCMCSampler'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
| 10,464
| 30.521084
| 95
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/PTMCMCSampler/nutsjump.py
|
"""
Implementation of the No-U-Turn-Sampler. Code follows algorithm 6 from the NUTS
paper (Hoffman & Gelman, 2011)
reference: arXiv:1111.4246
"The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte
Carlo", Matthew D. Hoffman & Andrew Gelman
Rutger van Haasteren
"""
import os
import sys
import numpy as np
import scipy.linalg as sl
class GradientJump(object):
"""Class for jumps using gradient information"""
def __init__(self, loglik_grad, logprior_grad, mm_inv, nburn=100):
"""Initialize the HMC class
:loglik_grad: Log-likelihood and gradient function
:logprior_grad: Log-prior and gradient function
:mm_inv: Inverse of the mass matrix (covariance matrix)
:nburn: Number of burn-in steps
"""
self._loglik_grad = loglik_grad # Log-likelihood & gradient
self._logprior_grad = logprior_grad # Log-prior & gradient
self.mm_inv = mm_inv # Inverse mass-matrix
self.nburn = nburn # Nr. of burn-in steps
self.ndim = len(self.mm_inv) # Number of dimensions
self.set_cf() # Whitening matrices
self.name = "GradientJUMP"
self.epsilon = None # Step-size
self.beta = 1.0 # Inverse temperature
self.iter = 0.0 # Number of gradient jumps
print("WARNING: GradientJumps not yet adaptive. Choose cov wisely!")
@property
def __name__(self):
return self.name
def set_cf(self):
"""Update the Cholesky factor of the inverse mass matrix"""
self.cov_cf = sl.cholesky(self.mm_inv, lower=True)
self.cov_cfi = sl.solve_triangular(self.cov_cf, np.eye(len(self.cov_cf)), trans=0, lower=True)
def update_cf(self):
"""Update the Cholesky factor of the inverse mass matrix
NOTE: this function is different from the one in GradientJump!
"""
# Since we are adaptively tuning the step size epsilon, we should at
# least keep the determinant of this guy equal to what it was before.
new_cov_cf = sl.cholesky(self.mm_inv, lower=True)
ldet_old = np.sum(np.log(np.diag(self.cov_cf)))
ldet_new = np.sum(np.log(np.diag(new_cov_cf)))
self.cov_cf = np.exp((ldet_old - ldet_new) / self.ndim) * new_cov_cf
self.cov_cfi = sl.solve_triangular(self.cov_cf, np.eye(len(self.cov_cf)), trans=0, lower=True)
def func_grad(self, x):
"""Log-prob and gradient, corrected for temperature"""
ll, ll_grad = self._loglik_grad(x)
lp, lp_grad = self._logprior_grad(x)
return self.beta * ll + lp, self.beta * ll_grad + lp_grad
def forward(self, x):
"""Coordinate transformation to whitened parameters x->q"""
return np.dot(self.cov_cfi.T, x)
def backward(self, q):
"""Coordinate transformation from whitened parameters q->x"""
return np.dot(self.cov_cf.T, q)
def func_grad_white(self, q):
"""Whitened version of func_grad"""
x = self.backward(q)
fv, fg = self.func_grad(x)
return fv, np.dot(self.cov_cf, fg)
def draw_momenta(self):
"""Draw new momentum variables"""
return np.random.randn(len(self.mm_inv))
def loghamiltonian(self, logl, r):
"""Value of the Hamiltonian, given a position and momentum value"""
try:
return logl - 0.5 * np.dot(r, r)
except ValueError:
return np.nan
def posmom_inprod(self, theta, r):
try:
return np.dot(theta, r)
except ValueError:
return np.nan
# With the following definitions, we should be able to get rid of this
# awkward coordinate transformation. Why does it not work?
"""
def forward(self, x):
#return np.dot(self.cov_cfi.T, x)
return x
def backward(self, q):
#return np.dot(self.cov_cf.T, q)
return q
def func_grad_white(self, q):
# We would be able to get rid of this function
#x = self.backward(q)
#fv, fg = self.func_grad(x)
#return fv, np.dot(self.cov_cf, fg)
return self.func_grad(q)
def draw_momenta(self):
#return np.random.randn(len(self.mm_inv))
return np.dot(self.cov_cfi.T, np.random.randn(self.ndim))
def loghamiltonian(self, logl, r):
try:
#return logl-0.5*np.dot(r, r)
newr = np.dot(self.cov_cfi.T, r)
return logl-0.5*np.dot(newr, newr)
except ValueError as err:
return np.nan
def posmom_inprod(self, theta, r):
try:
newr = np.dot(self.cov_cfi.T, r)
newtheta = np.dot(self.cov_cfi.T, theta)
return np.dot(newtheta, newr)
#return np.dot(theta, r)
except ValueError as err:
return np.nan
"""
def leapfrog(self, theta, r, grad, epsilon):
"""Perfom a leapfrog jump in the Hamiltonian space
:theta: Initial parameter position
:r: Initial momentum
:grad: Initial gradient
:epsilon: Step size
output
thetaprime: new parameter position
rprime: new momentum
gradprime: new gradient
logpprime: new log-probability
"""
rprime = r + 0.5 * epsilon * grad # half step in r
thetaprime = theta + epsilon * rprime # step in theta
logpprime, gradprime = self.func_grad_white(thetaprime) # compute gradient
rprime = rprime + 0.5 * epsilon * gradprime # half step in r
return thetaprime, rprime, gradprime, logpprime
def __call__(self, x, iter, beta):
"""Take one HMC trajectory step"""
self.iter += 1
if self.__name__ == "GradientJUMP":
raise NotImplementedError("GradientJump is an abstract base class!")
else:
return x, 0.0
class MALAJump(GradientJump):
"""MALA Jump"""
def __init__(self, loglik_grad, logprior_grad, mm_inv, nburn=100):
"""Initialize the MALA Jump"""
super(MALAJump, self).__init__(loglik_grad, logprior_grad, mm_inv, nburn=nburn)
self.name = "MALAJump"
self.cd = 2.4 / np.sqrt(self.ndim)
self.set_eigvecs()
def set_eigvecs(self):
"""Set the eigenvectors of the mass matrix"""
# Since we have whitened the parameter space, the decomposition is a
# simple identity matrix
self._u = np.eye(self.ndim)
self._s = np.ones(self.ndim)
def __call__(self, x, iter, beta):
"""Take one MALA step"""
super(MALAJump, self).__call__(x, iter, beta)
x = np.atleast_1d(x)
if len(np.shape(x)) > 1:
raise ValueError("x is expected to be a 1-D array")
self.beta = beta
# Update the mass matrix when in burn-in stage
# Currently, there is a problem with adjusting the mass matrix.
# Stepsize and mass matrix need to be tuned together?
# if iter <= self.nburn:
# self.update_cf()
# self.set_eigvecs()
# Initial starting position
q0 = self.forward(x)
logp, grad0 = self.func_grad_white(q0)
# Choose an eigenvector to jump in, and the size
i = np.random.randint(0, self.ndim)
vec = self._u[i, :]
val = self._s[i]
dist = np.random.randn()
# Do the leapfrog
mq0 = q0 + 0.5 * vec * self.cd**2 * np.dot(vec, grad0) / 2 / val
q1 = mq0 + dist * vec * self.cd / np.sqrt(val)
logp1, grad1 = self.func_grad_white(q1)
mq1 = q1 + 0.5 * vec * self.cd**2 * np.dot(vec, grad1) / 2 / val
qxy = 0.5 * (np.sum((mq0 - q1) ** 2 / val) - np.sum((mq1 - q0) ** 2 / val))
return self.backward(q1), qxy
class HMCJump(GradientJump):
"""Hamiltonian Monte Carlo Jump"""
def __init__(self, loglik_grad, logprior_grad, mm_inv, nburn=100, stepsize=0.1, nminsteps=10, nmaxsteps=300):
"""Initialize the MALA Jump"""
super(HMCJump, self).__init__(loglik_grad, logprior_grad, mm_inv, nburn=nburn)
self.name = "HMCJump"
self.epsilon = stepsize
self.nminsteps = nminsteps
self.nmaxsteps = nmaxsteps
def __call__(self, x, iter, beta):
"""Take one HMC step"""
super(HMCJump, self).__call__(x, iter, beta)
x = np.atleast_1d(x)
if len(np.shape(x)) > 1:
raise ValueError("x is expected to be a 1-D array")
# Set the temperature
self.beta = beta
# Update the mass matrix when in burn-in stage
# Currently, there is a problem with adjusting the mass matrix.
# Stepsize and mass matrix need to be tuned together?
# if iter <= self.nburn:
# self.update_cf()
# Initial starting position
q0 = self.forward(x)
qxy = 0
logp0, grad0 = self.func_grad_white(q0)
# Draw new momentum variables
p0 = self.draw_momenta()
joint0 = self.loghamiltonian(logp0, p0)
# Initialize the state
nsteps = np.random.randint(self.nminsteps, self.nmaxsteps)
p, q, grad = np.copy(p0), np.copy(q0), np.copy(grad0)
for ii in range(nsteps):
q1, p1, grad1, logp1 = self.leapfrog(q, p, grad, self.epsilon)
joint1 = self.loghamiltonian(logp1, p1)
p, q, grad = np.copy(p1), np.copy(q1), np.copy(grad1)
if (joint1 - 1000.0) < joint0:
# We are super inaccurate, so break the trajectory
break
qxy = joint1 - joint0
return self.backward(q), qxy
class Trajectory(object):
"""Keep track of trajectories in the NUTS jump"""
def __init__(self, ndim, bufsize=1000):
"""Initialize the trajectory object"""
self.ndim = ndim
self.bufadd = bufsize
self.bufsize_plus = bufsize
self.bufsize_minus = bufsize
self.trajlen_plus = 0
self.trajlen_minus = 0
self.trajbuf_plus = np.zeros((self.bufsize_plus, self.ndim))
self.trajind_plus = np.zeros(self.bufsize_plus)
self.trajbuf_minus = np.zeros((self.bufsize_minus, self.ndim))
self.trajind_minus = np.zeros(self.bufsize_minus)
def increase_buf(self, which="plus"):
"""Increase the buffer on the positive or the negative side"""
addbuf = np.zeros((self.bufadd, self.ndim))
addind = np.zeros(self.bufadd)
if which == "plus":
self.trajbuf_plus = np.append(self.trajbuf_plus, addbuf, axis=0)
self.trajind_plus = np.append(self.trajind_plus, addind)
self.bufsize_plus += self.bufadd
elif which == "minus":
self.trajbuf_minus = np.append(self.trajbuf_minus, addbuf, axis=0)
self.trajind_minus = np.append(self.trajind_minus, addind)
self.bufsize_minus += self.bufadd
def reset(self):
"""Reset the trajectory object"""
self.trajlen_plus = 0
self.trajlen_minus = 0
def add_sample(self, theta, ind, which="plus"):
"""Add a sample on the positive or the negative branch"""
if which == "plus":
if self.trajlen_plus >= self.bufsize_plus:
self.increase_buf(which="plus")
self.trajbuf_plus[self.trajlen_plus, :] = theta
self.trajind_plus[self.trajlen_plus] = ind
self.trajlen_plus += 1
elif which == "minus":
if self.trajlen_minus >= self.bufsize_minus:
self.increase_buf(which="minus")
self.trajbuf_minus[self.trajlen_minus, :] = theta
self.trajind_minus[self.trajlen_minus] = ind
self.trajlen_minus += 1
def length(self):
"""Function that returns the current trajectory length"""
return self.trajlen_plus + self.trajlen_minus
def get_trajectory(self, which="both"):
if which == "both":
return (
np.append(
self.trajbuf_minus[: self.trajlen_minus : -1, :], self.trajbuf_plus[: self.trajlen_plus, :], axis=0
),
np.append(self.trajind_minus[: self.trajlen_minus : -1], self.trajind_plus[: self.trajlen_plus]),
)
elif which == "plus":
return self.trajbuf_plus[: self.trajlen_plus], self.trajind_plus[: self.trajlen_plus]
elif which == "minus":
return self.trajbuf_minus[: self.trajlen_minus], self.trajind_minus[: self.trajlen_minus]
def get_used_trajectory(self, ind):
"""For index ind, get the trajectory that gets us there"""
tiplus = self.trajind_plus[: self.trajlen_plus]
timinus = self.trajind_minus[: self.trajlen_minus]
if ind in tiplus:
index = np.where(tiplus == ind)[0][0] + 1
return self.trajbuf_plus[:index, :]
elif ind in timinus:
index = np.where(timinus == ind)[0][0] + 1
return np.append(self.trajbuf_plus[:1, :], self.trajbuf_minus[:index, :], axis=0)
else:
raise ValueError("Index not found")
class NUTSJump(GradientJump):
"""Class for No-U-Turn-Sampling Hamiltonian Monte Carlo jumps"""
def __init__(
self,
loglik_grad,
logprior_grad,
mm_inv,
nburn=100,
trajectoryDir=None,
write_burnin=False,
force_trajlen=None,
force_epsilon=None,
delta=0.6,
):
"""Initialize the HMC class
:loglik_grad: Log-likelihood and gradient function
:logprior_grad: Log-prior and gradient function
:mm_inv: Inverse of the mass matrix (covariance matrix)
:nburn: Number of burn-in steps
:trajectoryDir: Output directory for full trajectories (for debugging)
:write_burnin: Whether we are writing the burn-in trajectories
"""
super(NUTSJump, self).__init__(loglik_grad, logprior_grad, mm_inv, nburn=nburn)
self.trajectoryDir = trajectoryDir # Trajectory directory
self.write_burnin = write_burnin # Write burnin trajectories?
self.name = "NUTSJUMP"
self.delta = delta # Target acceptance rate
self.traj = Trajectory(self.ndim, bufsize=1000) # Trajectory buffer
# Parameters for the dual averaging (for tuning epsilon)
self.gamma = 0.05
self.t0 = 10
self.kappa = 0.75
self.mu = None
self.epsilonbar = 1.0
self.Hbar = 0
# Parameters to force the trajectories to be pre-set
self.force_trajlen = force_trajlen
self.force_epsilon = force_epsilon
if self.force_epsilon is not None:
self.epsilonbar = self.force_epsilon
# Create the trajectory directory, if it does not exist yet
if self.trajectoryDir is not None:
if os.path.isfile(trajectoryDir):
raise IOError("Not a directory: {0}".format(trajectoryDir))
elif not os.path.isdir(trajectoryDir):
os.mkdir(trajectoryDir)
def find_reasonable_epsilon(self, theta0, grad0, logp0):
"""Heuristic for choosing an initial value of epsilon"""
epsilon = 1.0
r0 = self.draw_momenta()
# Figure out what direction we should be moving epsilon.
_, rprime, gradprime, logpprime = self.leapfrog(theta0, r0, grad0, epsilon)
# Make sure the step is not too large, so that the likelihood is finite
# and also stays inside the prior domain (if any)
k = 1.0
while np.isinf(logpprime) or np.isinf(gradprime).any():
k *= 0.5
_, rprime, _, logpprime = self.leapfrog(theta0, r0, grad0, epsilon * k)
epsilon = 0.5 * k * epsilon
acceptprob = np.exp(self.loghamiltonian(logpprime, rprime) - self.loghamiltonian(logp0, r0))
a = 2.0 * float((acceptprob > 0.5)) - 1.0
# Keep moving epsilon in that direction until acceptprob crosses 0.5.
while (acceptprob**a) > (2.0 ** (-a)):
epsilon = epsilon * (2.0**a)
_, rprime, _, logpprime = self.leapfrog(theta0, r0, grad0, epsilon)
acceptprob = np.exp(self.loghamiltonian(logpprime, rprime) - self.loghamiltonian(logp0, r0))
return epsilon
def stop_criterion(self, thetaminus, thetaplus, rminus, rplus, force_trajlen, index):
"""Compute the stop condition in the main loop
dot(dtheta, rminus) >= 0 & dot(dtheta, rplus >= 0)
INPUTS
------
thetaminus, thetaplus: ndarray[float, ndim=1]
under and above position
rminus, rplus: ndarray[float, ndim=1]
under and above momentum
OUTPUTS
-------
criterion: bool
return if the condition is valid
"""
dtheta = thetaplus - thetaminus
inprod_min = self.posmom_inprod(dtheta, rminus)
inprod_plus = self.posmom_inprod(dtheta, rplus)
# orig = (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0)
orig = (inprod_min >= 0) & (inprod_plus >= 0)
if force_trajlen is not None:
cont = index < force_trajlen
else:
cont = orig
return cont
def build_tree(self, theta, r, grad, logu, v, j, epsilon, joint0, ind, traj, force_trajlen):
"""The main recursion tree. Literally from Hoffman and Gelman (2011)."""
if j == 0:
# Base case: Take a single leapfrog step in the direction v.
thetaprime, rprime, gradprime, logpprime = self.leapfrog(theta, r, grad, v * epsilon)
joint = self.loghamiltonian(logpprime, rprime)
# Is the new point in the slice of the slice sampling step?
nprime = int(logu < joint)
# Is the simulation very inaccurate?
sprime = int((logu - 1000.0) < joint)
# Set the return values---minus=plus for all things here, since the
# "tree" is of depth 0.
thetaminus = thetaprime[:]
thetaplus = thetaprime[:]
rminus = rprime[:]
rplus = rprime[:]
gradminus = gradprime[:]
gradplus = gradprime[:]
# Compute the acceptance probability.
alphaprime = min(1.0, np.exp(joint - joint0))
nalphaprime = 1
if v == 1 and traj is not None:
ind_plus, ind_minus = ind + 1, ind
traj.add_sample(thetaprime, ind_plus, which="plus")
ind_prime = ind_plus
elif traj is not None:
ind_plus, ind_minus = ind, ind + 1
traj.add_sample(thetaprime, ind_minus, which="minus")
ind_prime = ind_minus
else:
# Recursion: Implicitly build the height j-1 left and right subtrees.
if v == 1:
(
thetaminus,
rminus,
gradminus,
thetaplus,
rplus,
gradplus,
thetaprime,
gradprime,
logpprime,
nprime,
sprime,
alphaprime,
nalphaprime,
ind_plus,
ind_minus,
ind_prime,
) = self.build_tree(theta, r, grad, logu, v, j - 1, epsilon, joint0, ind, traj, force_trajlen)
else:
(
thetaminus,
rminus,
gradminus,
thetaplus,
rplus,
gradplus,
thetaprime,
gradprime,
logpprime,
nprime,
sprime,
alphaprime,
nalphaprime,
ind_plus,
ind_minus,
ind_prime,
) = self.build_tree(theta, r, grad, logu, v, j - 1, epsilon, joint0, ind, traj, force_trajlen)
# No need to keep going if the stopping criteria were met in the first subtree.
if sprime == 1:
if v == -1:
(
thetaminus,
rminus,
gradminus,
_,
_,
_,
thetaprime2,
gradprime2,
logpprime2,
nprime2,
sprime2,
alphaprime2,
nalphaprime2,
ind_plus,
ind_minus,
ind_prime2,
) = self.build_tree(
thetaminus, rminus, gradminus, logu, v, j - 1, epsilon, joint0, ind_minus, traj, force_trajlen
)
else:
(
_,
_,
_,
thetaplus,
rplus,
gradplus,
thetaprime2,
gradprime2,
logpprime2,
nprime2,
sprime2,
alphaprime2,
nalphaprime2,
ind_plus,
ind_minus,
ind_prime2,
) = self.build_tree(
thetaplus, rplus, gradplus, logu, v, j - 1, epsilon, joint0, ind_plus, traj, force_trajlen
)
# Choose which subtree to propagate a sample up from.
if np.random.uniform() < (float(nprime2) / max(float(int(nprime) + int(nprime2)), 1.0)):
thetaprime = thetaprime2[:]
gradprime = gradprime2[:]
logpprime = logpprime2
ind_prime = ind_prime2
# Update the number of valid points.
nprime = int(nprime) + int(nprime2)
# Update the stopping criterion.
sprime = int(
sprime
and sprime2
and self.stop_criterion(
thetaminus, thetaplus, rminus, rplus, force_trajlen, max(ind_plus, ind_minus)
)
)
# Update the acceptance probability statistics.
alphaprime = alphaprime + alphaprime2
nalphaprime = nalphaprime + nalphaprime2
return (
thetaminus,
rminus,
gradminus,
thetaplus,
rplus,
gradplus,
thetaprime,
gradprime,
logpprime,
nprime,
sprime,
alphaprime,
nalphaprime,
ind_plus,
ind_minus,
ind_prime,
)
def __call__(self, x, iter, beta):
"""Take one HMC trajectory step"""
super(NUTSJump, self).__call__(x, iter, beta)
x = np.atleast_1d(x)
if len(np.shape(x)) > 1:
raise ValueError("x is expected to be a 1-D array")
q = self.forward(x)
self.beta = beta
# Always start evaluating the distribution and gradient.
# Potential speed-up: obtain these values from elsewhere, since we must
# have evaluated them already?
logp, grad = self.func_grad_white(q)
if self.epsilon is None and self.force_epsilon is None:
# First time doing an HMC jump
self.epsilon = self.find_reasonable_epsilon(q, grad, logp)
# print("Find reasonable epsilon: ", self.epsilon)
self.mu = np.log(10.0 * self.epsilon) # For dual averaging
elif self.epsilon is None and self.force_epsilon is not None:
# Force epsilon to be a certain value
self.epsilon = self.force_epsilon
self.mu = np.log(10.0 * self.epsilon) # For dual averaging
elif self.force_epsilon is not None:
# Force epsilon to be a certain value
self.epsilon = self.force_epsilon
# Update the mass matrix when in burn-in stage
# Currently, there is a problem with adjusting the mass matrix.
# Stepsize and mass matrix need to be tuned together?
# if iter <= self.nburn:
# self.update_cf()
# Set the start of the trajectory
r0 = self.draw_momenta()
joint = self.loghamiltonian(logp, r0)
# Initial slice sampling variable
logu = float(joint - np.random.exponential(1, size=1))
# Initialize the binary tree for this trajectory
sample = np.copy(q)
lnprob = np.copy(logp)
thetaminus = np.copy(sample)
thetaplus = np.copy(sample)
rminus = np.copy(r0)
rplus = np.copy(r0)
gradminus = np.copy(grad)
gradplus = np.copy(grad)
j = 0 # Initial tree heigth j = 0
n = 1 # Initially, the only valid point is the initial point
s = 1 # Stopping criterion
# Reset the trajectory buffer
self.traj.reset()
self.traj.add_sample(thetaminus, self.traj.length())
trajind, trajind_minus, trajind_plus, trajind_prime = 0, 0, 0, 0
while s == 1:
# Choose a direction. -1 = backwards, 1 = forwards.
v = int(2 * (np.random.uniform() < 0.5) - 1)
# Double the size of the tree.
if v == -1:
(
thetaminus,
rminus,
gradminus,
_,
_,
_,
thetaprime,
gradprime,
logpprime,
nprime,
sprime,
alpha,
nalpha,
trajind_plus,
trajind_minus,
trajind_prime,
) = self.build_tree(
thetaminus,
rminus,
gradminus,
logu,
v,
j,
self.epsilon,
joint,
trajind_minus,
self.traj,
self.force_trajlen,
)
else:
(
_,
_,
_,
thetaplus,
rplus,
gradplus,
thetaprime,
gradprime,
logpprime,
nprime,
sprime,
alpha,
nalpha,
trajind_plus,
trajind_minus,
trajind_prime,
) = self.build_tree(
thetaplus,
rplus,
gradplus,
logu,
v,
j,
self.epsilon,
joint,
trajind_plus,
self.traj,
self.force_trajlen,
)
# Use Metropolis-Hastings to decide whether or not to move to a
# point from the half-tree we just generated.
_tmp = min(1, float(nprime) / float(n))
if (sprime == 1) and (np.random.uniform() < _tmp):
sample[:] = thetaprime[:]
lnprob = np.copy(logpprime)
grad = np.copy(gradprime)
trajind = trajind_prime
# Update number of valid points we've seen.
n += nprime
# Decide if it's time to stop.
s = sprime and self.stop_criterion(
thetaminus, thetaplus, rminus, rplus, self.force_trajlen, max(trajind_plus, trajind_minus)
)
# Increment depth.
j += 1
sys.stdout.flush()
# Do adaptation of epsilon if we're still doing burn-in.
if self.force_epsilon is None:
eta = 1.0 / float(self.iter + self.t0)
self.Hbar = (1.0 - eta) * self.Hbar + eta * (self.delta - alpha / float(nalpha))
if iter <= self.nburn:
# Still in the burn-in phase. So adjust epsilon
self.epsilon = np.exp(self.mu - np.sqrt(self.iter) / self.gamma * self.Hbar)
eta = self.iter**-self.kappa
self.epsilonbar = np.exp((1.0 - eta) * np.log(self.epsilonbar) + eta * np.log(self.epsilon))
else:
self.epsilon = self.epsilonbar
if self.trajectoryDir is not None:
# Write the whole trajectory to file
if iter <= self.nburn and self.write_burnin:
trajfile_plus = os.path.join(self.trajectoryDir, "burnin-plus-{num:06d}.txt".format(num=iter))
trajfile_minus = os.path.join(self.trajectoryDir, "burnin-minus-{num:06d}.txt".format(num=iter))
trajfile_used = os.path.join(self.trajectoryDir, "burnin-used-{num:06d}.txt".format(num=iter))
np.savetxt(trajfile_plus, self.traj.get_trajectory(which="plus")[0])
np.savetxt(trajfile_minus, self.traj.get_trajectory(which="minus")[0])
np.savetxt(trajfile_used, self.traj.get_used_trajectory(trajind))
elif iter > self.nburn:
trajfile_plus = os.path.join(self.trajectoryDir, "plus-{num:06d}.txt".format(num=iter - self.nburn))
trajfile_minus = os.path.join(self.trajectoryDir, "minus-{num:06d}.txt".format(num=iter - self.nburn))
trajfile_used = os.path.join(self.trajectoryDir, "used-{num:06d}.txt".format(num=iter - self.nburn))
np.savetxt(trajfile_plus, self.traj.get_trajectory(which="plus")[0])
np.savetxt(trajfile_minus, self.traj.get_trajectory(which="minus")[0])
np.savetxt(trajfile_used, self.traj.get_used_trajectory(trajind))
# We need to always accept this step, so the qxy is just the inverse MH ratio
qxy = logp - lnprob
return self.backward(sample), qxy
| 30,428
| 35.181926
| 119
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/PTMCMCSampler/PTMCMCSampler.py
|
import os
import sys
import time
import numpy as np
from .nutsjump import HMCJump, MALAJump, NUTSJump
try:
from mpi4py import MPI
except ImportError:
print("Optional mpi4py package is not installed. MPI support is not available.")
from . import nompi4py as MPI
try:
import acor
except ImportError:
print(
"Optional acor package is not installed. Acor is optionally used to calculate the "
"effective chain length for output in the chain file."
)
pass
def shift_array(arr, num, fill_value=0.0):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result[:] = arr
return result
class PTSampler(object):
"""
Parallel Tempering Markov Chain Monte-Carlo (PTMCMC) sampler.
This implementation uses an adaptive jump proposal scheme
by default using both standard and single component Adaptive
Metropolis (AM) and Differential Evolution (DE) jumps.
This implementation also makes use of MPI (mpi4py) to run
the parallel chains.
Along with the AM and DE jumps, the user can add custom
jump proposals with the ``addProposalToCycle`` fuction.
@param ndim: number of dimensions in problem
@param logl: log-likelihood function
@param logp: log prior function (must be normalized for evidence evaluation)
@param cov: Initial covariance matrix of model parameters for jump proposals
@param covinds: Indices of parameters for which to perform adaptive jumps
@param loglargs: any additional arguments (apart from the parameter vector) for
log likelihood
@param loglkwargs: any additional keyword arguments (apart from the parameter vector)
for log likelihood
@param logpargs: any additional arguments (apart from the parameter vector) for
log like prior
@param logl_grad: log-likelihood function, including gradients
@param logp_grad: prior function, including gradients
@param logpkwargs: any additional keyword arguments (apart from the parameter vector)
for log prior
@param outDir: Full path to output directory for chain files (default = ./chains)
@param verbose: Update current run-status to the screen (default=True)
@param resume: Resume from a previous chain (still in testing so beware) (default=False)
"""
def __init__(
self,
ndim,
logl,
logp,
cov,
groups=None,
loglargs=[],
loglkwargs={},
logpargs=[],
logpkwargs={},
logl_grad=None,
logp_grad=None,
comm=MPI.COMM_WORLD,
outDir="./chains",
verbose=True,
resume=False,
seed=None,
):
# MPI initialization
self.comm = comm
self.MPIrank = self.comm.Get_rank()
self.nchain = self.comm.Get_size()
if self.MPIrank == 0:
ss = np.random.SeedSequence(seed)
child_seeds = ss.generate_state(self.nchain)
self.stream = [np.random.default_rng(s) for s in child_seeds]
else:
self.stream = None
self.stream = self.comm.scatter(self.stream, root=0)
self.ndim = ndim
self.logl = _function_wrapper(logl, loglargs, loglkwargs)
self.logp = _function_wrapper(logp, logpargs, logpkwargs)
if logl_grad is not None and logp_grad is not None:
self.logl_grad = _function_wrapper(logl_grad, loglargs, loglkwargs)
self.logp_grad = _function_wrapper(logp_grad, logpargs, logpkwargs)
else:
self.logl_grad = None
self.logp_grad = None
self.outDir = outDir
self.verbose = verbose
self.resume = resume
# setup output file
if not os.path.exists(self.outDir):
try:
os.makedirs(self.outDir)
except OSError:
pass
# find indices for which to perform adaptive jumps
self.groups = groups
if groups is None:
self.groups = [np.arange(0, self.ndim)]
# set up covariance matrix
self.cov = cov
self.U = [[]] * len(self.groups)
self.S = [[]] * len(self.groups)
# do svd on parameter groups
for ct, group in enumerate(self.groups):
covgroup = np.zeros((len(group), len(group)))
for ii in range(len(group)):
for jj in range(len(group)):
covgroup[ii, jj] = self.cov[group[ii], group[jj]]
self.U[ct], self.S[ct], v = np.linalg.svd(covgroup)
self.M2 = np.zeros((ndim, ndim))
self.mu = np.zeros(ndim)
# initialize proposal cycle
self.propCycle = []
self.jumpDict = {}
# indicator for auxilary jumps
self.aux = []
def initialize(
self,
Niter,
ladder=None,
Tmin=1,
Tmax=None,
Tskip=100,
isave=1000,
covUpdate=1000,
SCAMweight=30,
AMweight=20,
DEweight=50,
NUTSweight=20,
HMCweight=20,
MALAweight=0,
burn=50000,
HMCstepsize=0.1,
HMCsteps=300,
maxIter=None,
thin=10,
i0=0,
neff=100000,
writeHotChains=False,
hotChain=False,
):
"""
Initialize MCMC quantities
@param maxIter: maximum number of iterations
@Tmin: minumum temperature to use in temperature ladder
"""
# get maximum number of iteration
if maxIter is None and self.MPIrank > 0:
maxIter = Niter
elif maxIter is None and self.MPIrank == 0:
maxIter = Niter
self.ladder = ladder
self.covUpdate = covUpdate
self.SCAMweight = SCAMweight
self.AMweight = AMweight
self.DEweight = DEweight
self.burn = burn
self.Tskip = Tskip
self.thin = thin
self.isave = isave
self.Niter = Niter
self.neff = neff
self.tstart = 0
N = int(maxIter / thin)
self._lnprob = np.zeros(N)
self._lnlike = np.zeros(N)
self._chain = np.zeros((N, self.ndim))
self.naccepted = 0
self.swapProposed = 0
self.nswap_accepted = 0
# set up covariance matrix and DE buffers
if self.MPIrank == 0:
self._AMbuffer = np.zeros((self.covUpdate, self.ndim))
self._DEbuffer = np.zeros((self.burn, self.ndim))
# ##### setup default jump proposal distributions ##### #
# Gradient-based jumps
if self.logl_grad is not None and self.logp_grad is not None:
# DOES MALA do anything with the burnin? (Not adaptive enabled yet)
malajump = MALAJump(self.logl_grad, self.logp_grad, self.cov, self.burn)
self.addProposalToCycle(malajump, MALAweight)
if MALAweight > 0:
print("WARNING: MALA jumps are not working properly yet")
# Perhaps have an option to adaptively tune the mass matrix?
# Now that is done by defaulk
hmcjump = HMCJump(
self.logl_grad,
self.logp_grad,
self.cov,
self.burn,
stepsize=HMCstepsize,
nminsteps=2,
nmaxsteps=HMCsteps,
)
self.addProposalToCycle(hmcjump, HMCweight)
# Target acceptance rate (delta) should be optimal for 0.6
nutsjump = NUTSJump(
self.logl_grad,
self.logp_grad,
self.cov,
self.burn,
trajectoryDir=None,
write_burnin=False,
force_trajlen=None,
force_epsilon=None,
delta=0.6,
)
self.addProposalToCycle(nutsjump, NUTSweight)
# add SCAM
self.addProposalToCycle(self.covarianceJumpProposalSCAM, self.SCAMweight)
# add AM
self.addProposalToCycle(self.covarianceJumpProposalAM, self.AMweight)
# check length of jump cycle
if len(self.propCycle) == 0:
raise ValueError("No jump proposals specified!")
# randomize cycle
self.randomizeProposalCycle()
# setup default temperature ladder
if self.ladder is None:
self.ladder = self.temperatureLadder(Tmin, Tmax=Tmax)
# temperature for current chain
self.temp = self.ladder[self.MPIrank]
# hot chain sampling from prior
if hotChain and self.MPIrank == self.nchain - 1:
self.temp = 1e80
self.fname = self.outDir + "/chain_hot.txt"
else:
self.fname = self.outDir + "/chain_{0}.txt".format(self.temp)
# write hot chains
self.writeHotChains = writeHotChains
self.resumeLength = 0
if self.resume and os.path.isfile(self.fname):
if self.verbose:
print("Resuming run from chain file {0}".format(self.fname))
try:
self.resumechain = np.loadtxt(self.fname)
self.resumeLength = self.resumechain.shape[0]
except ValueError:
print("WARNING: Cant read in file. Removing last line.")
os.system("sed -ie '$d' {0}".format(self.fname))
self.resumechain = np.loadtxt(self.fname)
self.resumeLength = self.resumechain.shape[0]
self._chainfile = open(self.fname, "a")
else:
self._chainfile = open(self.fname, "w")
self._chainfile.close()
def updateChains(self, p0, lnlike0, lnprob0, iter):
"""
Update chains after jump proposals
"""
# update buffer
if self.MPIrank == 0:
self._AMbuffer[iter % self.covUpdate, :] = p0
# put results into arrays
if iter % self.thin == 0:
ind = int(iter / self.thin)
self._chain[ind, :] = p0
self._lnlike[ind] = lnlike0
self._lnprob[ind] = lnprob0
# write to file
if iter % self.isave == 0 and iter > 1 and iter > self.resumeLength:
if self.writeHotChains or self.MPIrank == 0:
self._writeToFile(iter)
# write output covariance matrix
np.save(self.outDir + "/cov.npy", self.cov)
if self.MPIrank == 0 and self.verbose and iter > 1:
sys.stdout.write("\r")
sys.stdout.write(
"Finished %2.2f percent in %f s Acceptance rate = %g"
% (iter / self.Niter * 100, time.time() - self.tstart, self.naccepted / iter)
)
sys.stdout.flush()
def sample(
self,
p0,
Niter,
ladder=None,
Tmin=1,
Tmax=None,
Tskip=100,
isave=1000,
covUpdate=1000,
SCAMweight=20,
AMweight=20,
DEweight=20,
NUTSweight=20,
MALAweight=20,
HMCweight=20,
burn=10000,
HMCstepsize=0.1,
HMCsteps=300,
maxIter=None,
thin=10,
i0=0,
neff=100000,
writeHotChains=False,
hotChain=False,
):
"""
Function to carry out PTMCMC sampling.
@param p0: Initial parameter vector
@param self.Niter: Number of iterations to use for T = 1 chain
@param ladder: User defined temperature ladder
@param Tmin: Minimum temperature in ladder (default=1)
@param Tmax: Maximum temperature in ladder (default=None)
@param Tskip: Number of steps between proposed temperature swaps (default=100)
@param isave: Number of iterations before writing to file (default=1000)
@param covUpdate: Number of iterations between AM covariance updates (default=1000)
@param SCAMweight: Weight of SCAM jumps in overall jump cycle (default=20)
@param AMweight: Weight of AM jumps in overall jump cycle (default=20)
@param DEweight: Weight of DE jumps in overall jump cycle (default=20)
@param NUTSweight: Weight of the NUTS jumps in jump cycle (default=20)
@param MALAweight: Weight of the MALA jumps in jump cycle (default=20)
@param HMCweight: Weight of the HMC jumps in jump cycle (default=20)
@param HMCstepsize: Step-size of the HMC jumps (default=0.1)
@param HMCsteps: Maximum number of steps in an HMC trajectory (default=300)
@param burn: Burn in time (DE jumps added after this iteration) (default=10000)
@param maxIter: Maximum number of iterations for high temperature chains
(default=2*self.Niter)
@param self.thin: Save every self.thin MCMC samples
@param i0: Iteration to start MCMC (if i0 !=0, do not re-initialize)
@param neff: Number of effective samples to collect before terminating
"""
# get maximum number of iteration
if maxIter is None and self.MPIrank > 0:
maxIter = Niter
elif maxIter is None and self.MPIrank == 0:
maxIter = Niter
# set up arrays to store lnprob, lnlike and chain
# if picking up from previous run, don't re-initialize
if i0 == 0:
self.initialize(
Niter,
ladder=ladder,
Tmin=Tmin,
Tmax=Tmax,
Tskip=Tskip,
isave=isave,
covUpdate=covUpdate,
SCAMweight=SCAMweight,
AMweight=AMweight,
DEweight=DEweight,
NUTSweight=NUTSweight,
MALAweight=MALAweight,
HMCweight=HMCweight,
burn=burn,
HMCstepsize=HMCstepsize,
HMCsteps=HMCsteps,
maxIter=maxIter,
thin=thin,
i0=i0,
neff=neff,
writeHotChains=writeHotChains,
hotChain=hotChain,
)
# compute lnprob for initial point in chain
# if resuming, just start with first point in chain
if self.resume and self.resumeLength > 0:
p0, lnlike0, lnprob0 = self.resumechain[0, :-4], self.resumechain[0, -3], self.resumechain[0, -4]
else:
# compute prior
lp = self.logp(p0)
if lp == float(-np.inf):
lnprob0 = -np.inf
lnlike0 = -np.inf
else:
lnlike0 = self.logl(p0)
lnprob0 = 1 / self.temp * lnlike0 + lp
# record first values
self.updateChains(p0, lnlike0, lnprob0, i0)
self.comm.barrier()
# start iterations
iter = i0
self.tstart = time.time()
runComplete = False
Neff = 0
while runComplete is False:
iter += 1
self.comm.barrier() # make sure all processes are at the same iteration
# call PTMCMCOneStep
p0, lnlike0, lnprob0 = self.PTMCMCOneStep(p0, lnlike0, lnprob0, iter)
# compute effective number of samples
if iter % 1000 == 0 and iter > 2 * self.burn and self.MPIrank == 0:
try:
Neff = iter / max(
1,
np.nanmax([acor.acor(self._chain[self.burn : (iter - 1), ii])[0] for ii in range(self.ndim)]),
)
# print('\n {0} effective samples'.format(Neff))
except NameError:
Neff = 0
pass
# stop if reached maximum number of iterations
if self.MPIrank == 0 and iter >= self.Niter - 1:
if self.verbose:
print("\nRun Complete")
runComplete = True
# stop if reached effective number of samples
if self.MPIrank == 0 and int(Neff) > self.neff:
if self.verbose:
print("\nRun Complete with {0} effective samples".format(int(Neff)))
runComplete = True
runComplete = self.comm.bcast(runComplete, root=0)
def PTMCMCOneStep(self, p0, lnlike0, lnprob0, iter):
"""
Function to carry out PTMCMC sampling.
@param p0: Initial parameter vector
@param lnlike0: Initial log-likelihood value
@param lnprob0: Initial log probability value
@param iter: iteration number
@return p0: next value of parameter vector after one MCMC step
@return lnlike0: next value of likelihood after one MCMC step
@return lnprob0: next value of posterior after one MCMC step
"""
# update covariance matrix
if (iter - 1) % self.covUpdate == 0 and (iter - 1) != 0 and self.MPIrank == 0:
self._updateRecursive(iter - 1, self.covUpdate)
# broadcast to other chains
[self.comm.send(self.cov, dest=rank + 1, tag=111) for rank in range(self.nchain - 1)]
# update covariance matrix
if (iter - 1) % self.covUpdate == 0 and (iter - 1) != 0 and self.MPIrank > 0:
self.cov[:, :] = self.comm.recv(source=0, tag=111)
for ct, group in enumerate(self.groups):
covgroup = np.zeros((len(group), len(group)))
for ii in range(len(group)):
for jj in range(len(group)):
covgroup[ii, jj] = self.cov[group[ii], group[jj]]
self.U[ct], self.S[ct], v = np.linalg.svd(covgroup)
# update DE buffer
if (iter - 1) % self.burn == 0 and (iter - 1) != 0 and self.MPIrank == 0:
self._updateDEbuffer(iter - 1, self.burn)
# broadcast to other chains
[self.comm.send(self._DEbuffer, dest=rank + 1, tag=222) for rank in range(self.nchain - 1)]
# update DE buffer
if (iter - 1) % self.burn == 0 and (iter - 1) != 0 and self.MPIrank > 0:
self._DEbuffer = self.comm.recv(source=0, tag=222)
# randomize cycle
if self.DEJump not in self.propCycle:
self.addProposalToCycle(self.DEJump, self.DEweight)
self.randomizeProposalCycle()
# after burn in, add DE jumps
if (iter - 1) == self.burn and self.MPIrank == 0:
if self.verbose:
print("Adding DE jump with weight {0}".format(self.DEweight))
self.addProposalToCycle(self.DEJump, self.DEweight)
# randomize cycle
self.randomizeProposalCycle()
# jump proposal ###
# if resuming, just use previous chain points
if self.resume and self.resumeLength > 0 and iter < self.resumeLength:
p0, lnlike0, lnprob0 = self.resumechain[iter, :-4], self.resumechain[iter, -3], self.resumechain[iter, -4]
# update acceptance counter
self.naccepted = iter * self.resumechain[iter, -2]
else:
y, qxy, jump_name = self._jump(p0, iter)
self.jumpDict[jump_name][0] += 1
# compute prior and likelihood
lp = self.logp(y)
if lp == -np.inf:
newlnprob = -np.inf
else:
newlnlike = self.logl(y)
newlnprob = 1 / self.temp * newlnlike + lp
# hastings step
diff = newlnprob - lnprob0 + qxy
if diff > np.log(self.stream.random()):
# accept jump
p0, lnlike0, lnprob0 = y, newlnlike, newlnprob
# update acceptance counter
self.naccepted += 1
self.jumpDict[jump_name][1] += 1
# temperature swap
if iter % self.Tskip == 0 and self.nchain > 1:
p0, lnlike0, lnprob0 = self.PTswap(p0, lnlike0, lnprob0, iter)
self.updateChains(p0, lnlike0, lnprob0, iter)
return p0, lnlike0, lnprob0
def PTswap(self, p0, lnlike0, lnprob0, iter):
"""
Do parallel tempering swap.
@param p0: current parameter vector
@param lnlike0: current log-likelihood
@param lnprob0: current log posterior value
@param iter: current iteration number
@return swapReturn: 0 = no swap proposed,
1 = swap proposed and rejected,
2 = swap proposed and accepted
@return p0: new parameter vector
@return lnlike0: new log-likelihood
@return lnprob0: new log posterior value
Repurposed from Neil Cornish/Bence Becsy's code:
"""
Ts = self.ladder
log_Ls = self.comm.gather(lnlike0, root=0) # list of likelihoods from each chain
p0s = self.comm.gather(p0, root=0) # list of parameter arrays from each chain
if self.MPIrank == 0:
# set up map to help keep track of swaps
swap_map = list(range(self.nchain))
# loop through and propose a swap at each chain (starting from hottest chain and going down in T)
# and keep track of results in swap_map
for swap_chain in reversed(range(self.nchain - 1)):
log_acc_ratio = -log_Ls[swap_map[swap_chain]] / Ts[swap_chain]
log_acc_ratio += -log_Ls[swap_map[swap_chain + 1]] / Ts[swap_chain + 1]
log_acc_ratio += log_Ls[swap_map[swap_chain + 1]] / Ts[swap_chain]
log_acc_ratio += log_Ls[swap_map[swap_chain]] / Ts[swap_chain + 1]
acc_ratio = np.exp(log_acc_ratio)
if self.stream.uniform() <= acc_ratio:
swap_map[swap_chain], swap_map[swap_chain + 1] = swap_map[swap_chain + 1], swap_map[swap_chain]
self.nswap_accepted += 1
self.swapProposed += 1
else:
self.swapProposed += 1
# loop through the chains and record the new samples and log_Ls
for j in range(self.nchain):
p0s[j] = p0s[swap_map[j]]
log_Ls[j] = log_Ls[swap_map[j]]
# broadcast the new samples and log_Ls to all chains
p0 = self.comm.scatter(p0s, root=0)
lnlike0 = self.comm.scatter(log_Ls, root=0)
# calculate new posterior values
lnprob0 = 1 / self.temp * lnlike0 + self.logp(p0)
return p0, lnlike0, lnprob0
def temperatureLadder(self, Tmin, Tmax=None, tstep=None):
"""
Method to compute temperature ladder. At the moment this uses
a geometrically spaced temperature ladder with a temperature
spacing designed to give 25 % temperature swap acceptance rate.
"""
# TODO: make options to do other temperature ladders
if self.nchain > 1:
if tstep is None and Tmax is None:
tstep = 1 + np.sqrt(2 / self.ndim)
elif tstep is None and Tmax is not None:
tstep = np.exp(np.log(Tmax / Tmin) / (self.nchain - 1))
ladder = np.zeros(self.nchain)
for ii in range(self.nchain):
ladder[ii] = Tmin * tstep**ii
else:
ladder = np.array([1])
return ladder
def _writeToFile(self, iter):
"""
Function to write chain file. File has 3+ndim columns,
the first is log-posterior (unweighted), log-likelihood,
and acceptance probability, followed by parameter values.
@param iter: Iteration of sampler
"""
self._chainfile = open(self.fname, "a+")
for jj in range((iter - self.isave), iter, self.thin):
ind = int(jj / self.thin)
pt_acc = 1
if self.MPIrank < self.nchain - 1 and self.swapProposed != 0:
pt_acc = self.nswap_accepted / self.swapProposed
self._chainfile.write("\t".join(["%22.22f" % (self._chain[ind, kk]) for kk in range(self.ndim)]))
self._chainfile.write(
"\t%f\t%f\t%f\t%f\n" % (self._lnprob[ind], self._lnlike[ind], self.naccepted / iter, pt_acc)
)
self._chainfile.close()
# write jump statistics files ####
# only for T=1 chain
if self.MPIrank == 0:
# first write file contaning jump names and jump rates
fout = open(self.outDir + "/jumps.txt", "w")
njumps = len(self.propCycle)
ujumps = np.array(list(set(self.propCycle)))
for jump in ujumps:
fout.write("%s %4.2g\n" % (jump.__name__, np.sum(np.array(self.propCycle) == jump) / njumps))
fout.close()
# now write jump statistics for each jump proposal
for jump in self.jumpDict:
fout = open(self.outDir + "/" + jump + "_jump.txt", "a+")
fout.write("%g\n" % (self.jumpDict[jump][1] / max(1, self.jumpDict[jump][0])))
fout.close()
# function to update covariance matrix for jump proposals
def _updateRecursive(self, iter, mem):
"""
Function to recursively update sample covariance matrix.
@param iter: Iteration of sampler
@param mem: Number of steps between updates
"""
it = iter - mem
ndim = self.ndim
if it == 0:
self.M2 = np.zeros((ndim, ndim))
self.mu = np.zeros(ndim)
for ii in range(mem):
diff = np.zeros(ndim)
it += 1
for jj in range(ndim):
diff[jj] = self._AMbuffer[ii, jj] - self.mu[jj]
self.mu[jj] += diff[jj] / it
self.M2 += np.outer(diff, (self._AMbuffer[ii, :] - self.mu))
self.cov[:, :] = self.M2 / (it - 1)
# do svd on parameter groups
for ct, group in enumerate(self.groups):
covgroup = np.zeros((len(group), len(group)))
for ii in range(len(group)):
for jj in range(len(group)):
covgroup[ii, jj] = self.cov[group[ii], group[jj]]
self.U[ct], self.S[ct], v = np.linalg.svd(covgroup)
# update DE buffer samples
def _updateDEbuffer(self, iter, burn):
"""
Update Differential Evolution with last burn
values in the total chain
@param iter: Iteration of sampler
@param burn: Total number of samples in DE buffer
"""
self._DEbuffer = shift_array(self._DEbuffer, -len(self._AMbuffer)) # shift DEbuffer to the left
self._DEbuffer[-len(self._AMbuffer) :] = self._AMbuffer # add new samples to the new empty spaces
# SCAM jump
def covarianceJumpProposalSCAM(self, x, iter, beta):
"""
Single Component Adaptive Jump Proposal. This function will occasionally
jump in more than 1 parameter. It will also occasionally use different
jump sizes to ensure proper mixing.
@param x: Parameter vector at current position
@param iter: Iteration of sampler
@param beta: Inverse temperature of chain
@return: q: New position in parameter space
@return: qxy: Forward-Backward jump probability
"""
q = x.copy()
qxy = 0
# choose group
jumpind = self.stream.integers(0, len(self.groups))
ndim = len(self.groups[jumpind])
# adjust step size
prob = self.stream.random()
# large jump
if prob > 0.97:
scale = 10
# small jump
elif prob > 0.9:
scale = 0.2
# small-medium jump
# elif prob > 0.6:
# standard medium jump
else:
scale = 1.0
# adjust scale based on temperature
if self.temp <= 100:
scale *= np.sqrt(self.temp)
# get parmeters in new diagonalized basis
# y = np.dot(self.U.T, x[self.covinds])
# make correlated componentwise adaptive jump
ind = np.unique(self.stream.integers(0, ndim, 1))
neff = len(ind)
cd = 2.4 / np.sqrt(2 * neff) * scale
q[self.groups[jumpind]] += (
self.stream.standard_normal() * cd * np.sqrt(self.S[jumpind][ind]) * self.U[jumpind][:, ind].flatten()
)
return q, qxy
# AM jump
def covarianceJumpProposalAM(self, x, iter, beta):
"""
Adaptive Jump Proposal. This function will occasionally
use different jump sizes to ensure proper mixing.
@param x: Parameter vector at current position
@param iter: Iteration of sampler
@param beta: Inverse temperature of chain
@return: q: New position in parameter space
@return: qxy: Forward-Backward jump probability
"""
q = x.copy()
qxy = 0
# choose group
jumpind = self.stream.integers(0, len(self.groups))
# adjust step size
prob = self.stream.random()
# large jump
if prob > 0.97:
scale = 10
# small jump
elif prob > 0.9:
scale = 0.2
# small-medium jump
# elif prob > 0.6:
# scale = 0.5
# standard medium jump
else:
scale = 1.0
# adjust scale based on temperature
if self.temp <= 100:
scale *= np.sqrt(self.temp)
# get parmeters in new diagonalized basis
y = np.dot(self.U[jumpind].T, x[self.groups[jumpind]])
# make correlated componentwise adaptive jump
ind = np.arange(len(self.groups[jumpind]))
neff = len(ind)
cd = 2.4 / np.sqrt(2 * neff) * scale
y[ind] = y[ind] + self.stream.standard_normal(neff) * cd * np.sqrt(self.S[jumpind][ind])
q[self.groups[jumpind]] = np.dot(self.U[jumpind], y)
return q, qxy
# Differential evolution jump
def DEJump(self, x, iter, beta):
"""
Differential Evolution Jump. This function will occasionally
use different jump sizes to ensure proper mixing.
@param x: Parameter vector at current position
@param iter: Iteration of sampler
@param beta: Inverse temperature of chain
@return: q: New position in parameter space
@return: qxy: Forward-Backward jump probability
"""
# get old parameters
q = x.copy()
qxy = 0
# choose group
jumpind = self.stream.integers(0, len(self.groups))
ndim = len(self.groups[jumpind])
bufsize = len(self._DEbuffer)
# draw a random integer from 0 - iter
mm = self.stream.integers(0, bufsize)
nn = self.stream.integers(0, bufsize)
# make sure mm and nn are not the same iteration
while mm == nn:
nn = self.stream.integers(0, bufsize)
# get jump scale size
prob = self.stream.random()
# mode jump
if prob > 0.5:
scale = 1.0
else:
scale = self.stream.random() * 2.4 / np.sqrt(2 * ndim) * np.sqrt(1 / beta)
for ii in range(ndim):
# jump size
sigma = self._DEbuffer[mm, self.groups[jumpind][ii]] - self._DEbuffer[nn, self.groups[jumpind][ii]]
# jump
q[self.groups[jumpind][ii]] += scale * sigma
return q, qxy
# add jump proposal distribution functions
def addProposalToCycle(self, func, weight):
"""
Add jump proposal distributions to cycle with a given weight.
@param func: jump proposal function
@param weight: jump proposal function weight in cycle
"""
# get length of cycle so far
length = len(self.propCycle)
# check for 0 weight
if weight == 0:
# print('ERROR: Can not have 0 weight in proposal cycle!')
# sys.exit()
return
# add proposal to cycle
for ii in range(length, length + weight):
self.propCycle.append(func)
# add to jump dictionary and initialize file
if func.__name__ not in self.jumpDict:
self.jumpDict[func.__name__] = [0, 0]
fout = open(self.outDir + "/" + func.__name__ + "_jump.txt", "w")
fout.close()
# add auxilary jump proposal distribution functions
def addAuxilaryJump(self, func):
"""
Add auxilary jump proposal distribution. This will be called after every
standard jump proposal. Examples include cyclic boundary conditions and
pulsar phase fixes
@param func: jump proposal function
"""
# set auxilary jump
self.aux.append(func)
# randomized proposal cycle
def randomizeProposalCycle(self):
"""
Randomize proposal cycle that has already been filled
"""
# get length of full cycle
length = len(self.propCycle)
# get random integers
index = np.arange(length)
self.stream.shuffle(index)
# randomize proposal cycle
self.randomizedPropCycle = [self.propCycle[ind] for ind in index]
# call proposal functions from cycle
def _jump(self, x, iter):
"""
Call Jump proposals
"""
# get length of cycle
length = len(self.propCycle)
# call function
ind = self.stream.integers(0, length)
q, qxy = self.propCycle[ind](x, iter, 1 / self.temp)
# axuilary jump
if len(self.aux) > 0:
for aux in self.aux:
q, qxy_aux = aux(x, q, iter, 1 / self.temp)
qxy += qxy_aux
return q, qxy, self.propCycle[ind].__name__
# TODO: jump statistics
class _function_wrapper(object):
"""
This is a hack to make the likelihood function pickleable when ``args``
or ``kwargs`` are also included.
"""
def __init__(self, f, args, kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, x):
return self.f(x, *self.args, **self.kwargs)
| 34,034
| 32.075802
| 118
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/PTMCMCSampler/nompi4py.py
|
# Dummy class for packages that have no MPI
class MPIDummy(object):
def __init__(self):
pass
def Get_rank(self):
return 0
def Get_size(self):
return 1
def barrier(self):
pass
def send(self, lnlike0, dest=1, tag=55):
pass
def recv(self, source=1, tag=55):
pass
def Iprobe(self, source=1, tag=55):
pass
def scatter(self, sendobj, **kwargs):
if sendobj is not None:
return sendobj[0]
return None
def bcast(self, obj, **kwargs):
return obj
def gather(self, sendobj, **kwargs):
return [sendobj]
# Global object representing no MPI:
COMM_WORLD = MPIDummy()
| 702
| 17.5
| 44
|
py
|
PTMCMCSampler
|
PTMCMCSampler-master/PTMCMCSampler/__init__.py
|
from PTMCMCSampler import PTMCMCSampler # noqa: F401
from .version import version
__version__ = version
| 107
| 17
| 53
|
py
|
MENET
|
MENET-master/light/inference.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/inference.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/30
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
from datetime import datetime
import cv2
import numpy as np
import tensorflow as tf
from configuration import cfg
from utils import inference_wrapper
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu_id # only /gpu:gpu_id is visible
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(os.path.join(cfg.model_dir, model.nickname))
g.finalize()
print("Restore model from directory: {}".format(os.path.join(cfg.model_dir, model.nickname)))
filenames = list(filter(lambda x: x.endswith('.jpg'), os.listdir(cfg.infer_in_dir)))
filenames = [os.path.join(cfg.infer_in_dir, filename) for filename in filenames]
print("Running de-rain infer on %d files from directory: %s" % (len(filenames), cfg.infer_in_dir))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(graph=g, config=config) as sess:
# Load the model from checkpoint.
restore_fn(sess)
if not os.path.exists(cfg.infer_out_dir):
os.makedirs(cfg.infer_out_dir)
for i, filename in enumerate(filenames):
bgr = cv2.imread(filename)
h, w = bgr.shape[:2]
if w % cfg.scale_ratio != 0 or h % cfg.scale_ratio != 0:
aw = (cfg.scale_ratio - w % cfg.scale_ratio) % cfg.scale_ratio
ah = (cfg.scale_ratio - h % cfg.scale_ratio) % cfg.scale_ratio
bgr = cv2.resize(bgr, (w + aw, h + ah), interpolation=cv2.INTER_CUBIC)
rgb_array = np.expand_dims(np.asarray(bgr[..., ::-1], "float32"), 0)
rgb_array = model.inference_step(sess=sess, input_feed=rgb_array)[0]
basename = os.path.basename(filename).split(".")[0]
b_output = cv2.resize(rgb_array[..., ::-1], (w, h), interpolation=cv2.INTER_CUBIC)
print(basename, b_output.shape, np.max(b_output),np.min(b_output),np.mean(b_output))
cv2.imwrite(os.path.join(cfg.infer_out_dir,
"{}@{}_{}.png".format(basename, model.nickname, datetime.now().date())), b_output)
if __name__ == "__main__":
tf.app.run()
| 2,443
| 37.793651
| 119
|
py
|
MENET
|
MENET-master/light/build_h5_dataset.py
|
# -*- coding: utf-8 -*-
# @File : derain_gradnorm_tf/build_h5_dataset.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
from random import shuffle
import h5py
import numpy as np
from PIL import Image
from configuration import cfg
from utils import transforms
class DataLoader(object):
"""Construct a generator"""
def __init__(self, image_dir, crop_size=64, blend_mode="linear", horizontal_flip=False):
"""
:param image_dir (str): path of the images
:param crop_size (int or tuple): crop size, default is 64
:param blend_mode (str): pick on of two, `screen` or `linear`, represents image composition type
:param horizontal_flip (bool): Whether use horizontal flipping or not
"""
# super(DataLoader, self).__init__()
# 1. initialize file path or a list of file names.
assert blend_mode in ["screen", "linear"]
self.blend_mode = blend_mode
self.data_path = image_dir
self.all_filenames = os.listdir(self.data_path)
self.label_filenames = list(filter(lambda filename: filename.startswith("norain"), self.all_filenames))
self.num_files = len(self.label_filenames)
print("[DataLoader] preprocess {} files on dir `{}`".format(self.num_files, self.data_path))
self.transform = transforms.Compose([transforms.FiveCrop(crop_size, horizontal_flip), # tuple (tl, tr, bl, br, center)
lambda crops: np.stack([transforms.ToArray()(crop) for crop in crops])])
def __getitem__(self, item):
# 1. read one data from file (e.g. using PIL.Image.open).
# 2. Preprocess the data (e.g. Transform).
# 3. Return a data pair (e.g. image and label).
if self.blend_mode == "screen":
input_image = Image.open(os.path.join(self.data_path,
self.label_filenames[item].replace("norain", "screenrainy")))
else:
input_image = Image.open(os.path.join(self.data_path,
self.label_filenames[item].replace("norain", "rain")))
label_image = Image.open(os.path.join(self.data_path, self.label_filenames[item]))
noise_image = Image.open(os.path.join(self.data_path, self.label_filenames[item].replace("norain", "rainstreak")))
sample = {'syn': input_image, 'bg': label_image, 'r': noise_image}
if self.transform:
sample['syn'] = self.transform(sample['syn'])
sample['bg'] = self.transform(sample['bg'])
sample['r'] = self.transform(sample['r'])
return sample
def __len__(self):
# the total size of dataset.(number of samples)
return self.num_files
def save2h5(save_path="temp.h5", image_dir="/dataset/cvpr2017_derain_dataset/training_data/RainTrainL",
crop_size=224, blend_mode="linear", horizontal_flip=False):
dataloader = DataLoader(image_dir, crop_size, blend_mode, horizontal_flip)
img_pair = []
for samples in dataloader:
samples['r'] = np.expand_dims(samples['r'], -1)
img_pair.append(np.concatenate([samples['syn'], samples['bg'], samples['r']], -1))
img_pair_ndarray = np.concatenate(img_pair, 0)
idx = np.arange(img_pair_ndarray.shape[0])
shuffle(idx)
img_pair_ndarray = np.take(img_pair_ndarray, idx, 0)
input_ndarray, label_ndarray, noise_ndarray = np.split(img_pair_ndarray, [3, 6], -1)
dirname = os.path.dirname(save_path)
if not os.path.exists(dirname):
os.mkdir(dirname)
f = h5py.File(save_path, 'w')
_ = f.create_dataset("syn", data=input_ndarray, compression="gzip")
_ = f.create_dataset("bg", data=label_ndarray, compression="gzip")
_ = f.create_dataset("r", data=noise_ndarray, compression="gzip")
f.close()
if __name__ == '__main__':
img_dir = os.path.join(cfg.original_image_dir, cfg.sub_dir)
save2h5("{}.h5".format(os.path.join(cfg.test_dir, cfg.sub_dir)), img_dir, cfg.crop_size, "linear",
cfg.horizontal_flip)
| 4,172
| 42.926316
| 127
|
py
|
MENET
|
MENET-master/light/validation.py
|
# -*- coding: utf-8 -*-
# @File : derain_wgan_tf/validation.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/30
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
import platform
from datetime import datetime
from time import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from configuration import cfg
from data_helper import get_batch
from net import Model
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu_id
def main(_):
# build model
model = Model("eval")
model.build()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)
if os.path.exists(os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
model_file = tf.train.latest_checkpoint(os.path.join(cfg.model_dir, model.nickname))
saver.restore(sess, model_file)
else:
exit()
ssim_list = list()
psnr_list = list()
mse_list = list()
time_list = list()
for batch_syn, batch_bg in tqdm(get_batch(os.path.join(cfg.test_dir, cfg.data_filename), cfg.batch_size)):
batch_syn = np.asarray(batch_syn, "float32")
batch_bg = np.asarray(batch_bg, "float32")
feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}
start = time()
mse, ssim, psnr = sess.run([model.mse, model.ssim, model.psnr], feed_dict=feed_dict)
end = time()
ssim_list.append(ssim)
psnr_list.append(psnr)
mse_list.append(mse)
time_list.append(end - start)
avg_ssim = np.mean(ssim_list)
avg_psnr = np.mean(psnr_list)
avg_mse = np.mean(mse_list)
avg_time = np.mean(time_list) / cfg.batch_size
if not os.path.exists(cfg.metric_dir):
os.makedirs(cfg.metric_dir)
with open(os.path.join(cfg.metric_dir, 'metrics.txt'), 'a') as f:
f.write("os:\t{}\t\t\tdate:\t{}\n".format(platform.system(), datetime.now()))
f.write("model:\t{}\t\timage_size:\t{}\n".format(model.nickname, cfg.crop_size))
f.write("data:\t{}\t\tgpu_id:\t{}\n".format(cfg.data_filename, cfg.gpu_id))
f.write("speed:\t{:.8f} s/item\tmse:\t{:.8f}\n".format(avg_time, avg_mse))
f.write("ssim:\t{:.8f}\t\tpsnr:\t{:.8f}\n\n".format(avg_ssim, avg_psnr))
print(" ------ Arriving at the end of data ------ ")
if __name__ == '__main__':
tf.app.run()
| 2,654
| 31.777778
| 114
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.